path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
geo_heatmap_jupyter_Students.ipynb | ###Markdown
First, let's install and import all required libraries
###Code
#!pip install -r requirements.txt
import collections #This library adds some extras to the standard python data structures
import folium #Great library for plotting data on maps
import json #This library allows us to load and handle json files with python
import os #This module is usefull for handling paths and directories
import webbrowser #This library allows to handle the webbrowser from python
import time
import datetime
import pandas as pd
from folium.plugins import HeatMap
###Output
_____no_output_____
###Markdown
Now, let's load and analyze our location history json file
###Code
data = json.load(open('Location_History.json', encoding="utf8"))
data
type(data)
data["locations"]
df = pd.DataFrame(data["locations"])
df = df.drop(columns=['activity'])
df
###Output
_____no_output_____
###Markdown
Now we can extract and tranform the data we need to make our map
###Code
coordinates = collections.defaultdict(int)
coordinates
unique_coordinates = (0, 0)
max_magnitude = 0
"""Here you transfor the coordinates given by google in actual longitude
and latitude coordinates"""
for i, loc in enumerate(data["locations"]):
# print(i)
# print(loc)
if "latitudeE7" not in loc or "longitudeE7" not in loc:
continue
coords = (round(loc["latitudeE7"] / 1e7, 6),
round(loc["longitudeE7"] / 1e7, 6))
# print(coords)
"""Here you calculate the magnitude for all coordinates"""
#print(loc["timestampMs"])
coordinates[coords] += 1 #these are the magnitude values we will need for the coordinates dictionary
#print(coordinates[coords])
if coordinates[coords] > max_magnitude:
unique_coordinates = coords
max_magnitude = coordinates[coords]
#print(unique_coordinates)
#print(max_magnitude)
coordinates
tilesoptions = ["openstreetmap", "StamenTerrain", "stamentoner",
"stamenwatercolor", "cartodbpositron", "cartodbdark_matter"]
tiles = tilesoptions[0]
zoom_start = 10
radius = 7
blur = 4
min_opacity = 0.2
max_zoom = 10
map_data = [(coords[0], coords[1], magnitude)
for coords, magnitude in coordinates.items()]
map_data
# Generate background map
m = folium.Map(location=unique_coordinates,
zoom_start=zoom_start,
tiles=tiles)
# Generate heat map
heatmap = HeatMap(map_data,
max_val=max_magnitude,
min_opacity=min_opacity,
radius=radius,
blur=blur,
max_zoom=max_zoom)
# Combine both maps
m.add_child(heatmap)
###Output
_____no_output_____
###Markdown
We can now save our map as an html file and launch it in the browser
###Code
output_file = tiles + 'heatmapSpicedStudent.html'
m.save(output_file)
webbrowser.open("file://" + os.path.realpath(output_file))
###Output
_____no_output_____
###Markdown
Now let's wrap everything into functions and play with it
###Code
def transformcoordinates(inputdata):
coordinates = collections.defaultdict(int)
unique_coordinates = (0, 0)
max_magnitude = 0
for i, loc in enumerate(inputdata):
# print(i)
# print(loc)
if "latitudeE7" not in loc or "longitudeE7" not in loc:
continue
coords = (round(loc["latitudeE7"] / 1e7, 6),
round(loc["longitudeE7"] / 1e7, 6))
# print(coords)
"""Here you calculate the magnitude for all coordinates"""
#print(loc["timestampMs"])
coordinates[coords] += 1 #these are the magnitude values we will need for the coordinates dictionary
#print(coordinates[coords])
if coordinates[coords] > max_magnitude:
unique_coordinates = coords
max_magnitude = coordinates[coords]
#print(unique_coordinates)
#print(max_magnitude)
return coordinates
coordinates = transformcoordinates(data["locations"])
coordinates
def plotmymaps(Tiles, Zoom_start=10, Radius=7, Blur=4, Min_opacity=0.2, Max_zoom=10):
tilesoptions = ["openstreetmap", "StamenTerrain", "stamentoner",
"stamenwatercolor", "cartodbpositron", "cartodbdark_matter"]
tiles = tilesoptions[Tiles]
zoom_start = Zoom_start
radius = Radius
blur = Blur
min_opacity = Min_opacity
max_zoom = Max_zoom
map_data = [(coords[0], coords[1], magnitude)
for coords, magnitude in coordinates.items()]
# Generate background map
m = folium.Map(location=unique_coordinates,
zoom_start=zoom_start,
tiles=tiles)
# Generate heat map
heatmap = HeatMap(map_data,
max_val=max_magnitude,
min_opacity=min_opacity,
radius=radius,
blur=blur,
max_zoom=max_zoom)
m.add_child(heatmap)
output_file = tiles + 'heatmapSpicedStudent.html'
m.save(output_file)
webbrowser.open("file://" + os.path.realpath(output_file))
plotmymaps(5,3,3,3,3,3)
###Output
_____no_output_____
###Markdown
What if we only want to plot data for a specific time range?
###Code
df
min_date='2020-1-1'
max_date='2020-1-31'
def transformdate(date):
element = datetime.datetime.strptime(date,"%Y-%m-%d")
elementtuple = element.timetuple()
timestamp = time.mktime(elementtuple)
return timestamp
min_timestamp=(transformdate(min_date))*1000
min_timestamp
max_timestamp=(transformdate(max_date))*1000
max_timestamp
df["timestampMs"] = pd.to_numeric(df["timestampMs"])
df2 = df[df.timestampMs > min_timestamp]
df2 = df2[df.timestampMs < max_timestamp]
df2
df2.to_json('tempStudents.json', orient='records', lines=True)
data2 = [json.loads(line) for line in open('tempStudents.json', 'r')]
data2
coordinates = transformcoordinates(data2)
coordinates
plotmymaps(1)
###Output
_____no_output_____ |
Applied_Data_Science_with_Python/data_science_LA_Sports/code_to_clean_data/web_scraping_rams.ipynb | ###Markdown
Web scrapping. Load table from website. NOTE: Clean data is loaded later in case website is no longer available. Team Name
###Code
team_name = 'Rams'
#https://www.coursera.org/learn/python-plotting/discussions/weeks/4/threads/yMT7zw2KEeeqzRJFs29uDA
import pandas as pd
from IPython.display import display, HTML
def install_module(module):
! conda install "$module" -y
js_cmd = ['IPython.notebook.kernel.restart();',
'IPython.notebook.select(1);',
'IPython.notebook.execute_cell();'
]
js = "<script>{0}</script>".format(' '.join(js_cmd))
display(HTML(js))
#url = 'https://simple.wikipedia.org/wiki/List_of_U.S._states'
url = 'https://en.wikipedia.org/wiki/List_of_Los_Angeles_Rams_seasons'
try:
df_list = pd.read_html(url)
except Exception as e:
print(e)
# #install necessary modules for read_html
module = str(e).split()[0]
install_module(module)
print('Number of Data Frames {}'.format(len(df_list)))
df_list[0].columns = df_list[0].iloc[0]
df = df_list[0].iloc[1:]
df.head()
###Output
Number of Data Frames 10
###Markdown
Multiple tables found. Capture them all in a list of dataframes.
###Code
# https://stackoverflow.com/questions/42225204/use-pandas-to-get-multiple-tables-from-webpage
import urllib
from bs4 import BeautifulSoup
url = 'https://en.wikipedia.org/wiki/List_of_Los_Angeles_Rams_seasons'
html_table = urllib.request.urlopen(url).read()
# fix HTML
soup = BeautifulSoup(html_table, "html.parser")
# warn! id ratings-table is your page specific
for table in soup.findChildren(attrs={'id': 'ratings-table'}):
for c in table.children:
if c.name in ['tbody', 'thead']:
c.unwrap()
list_df = pd.read_html(str(soup), flavor="bs4")
len(list_df[0])
###Output
_____no_output_____
###Markdown
This is the table we are interested it.
###Code
#list_df[2][15:].head()
#list_df[0].head()
#list_df[0].tail(10)
df_rams = list_df[2]
df_rams.head()
###Output
_____no_output_____
###Markdown
Save raw data .csv
###Code
file_name = team_name +'_data_raw'
#csv
df_rams.to_csv( file_name +'.csv')
###Output
_____no_output_____
###Markdown
Clean the data. Make the first row the header for the columns.
###Code
# Make the first row the header column
# NOTE: This does not get rid of the row.
df_rams.columns = df_rams.iloc[0]
# Re-index and drop the first row.
df_rams_cleaned = df_rams.reindex(df_rams.index.drop(0))
# Keep the rows we want (i.e. the years they became the team for the city.)
df_rams_cleaned = df_rams_cleaned[37:87]
# Drop the rows we do not need.
df_rams_cleaned.reset_index(drop = True,
inplace = True)
df_rams_cleaned = df_rams_cleaned.drop(df_rams_cleaned.index[[25,47,49]])
# Keep columns of interest.
# https://stackoverflow.com/questions/14940743/selecting-excluding-sets-of-columns-in-pandas
columns_to_keep = ['Season', 'Postseason results', 'Awards']
df_rams_cleaned = df_rams_cleaned[columns_to_keep]
# Rename the columns
df_rams_cleaned.columns = ['Season', 'Wins', 'Losses']
# Change the year to have only the first year and not a range.
# i.e. 1960 and not 1961-62
df_rams_cleaned['Season'] = df_rams_cleaned['Season'].apply(lambda x: x[:4])
# # # Check type
# # #df_rams_cleaned.dtypes
# # # NOTE:
# # # Year 2004-05, Season cancelled due to 2004–05 NHL lockout
# # #
# # # https://stackoverflow.com/questions/18434208/pandas-converting-to-numeric-creating-nans-when-necessary
df_rams_cleaned = df_rams_cleaned.apply(pd.to_numeric, errors='coerce')
#df_rams_cleaned['Wins'] = df_rams_cleaned['Wins'].apply(pd.to_numeric, errors='coerce')
# # df_rams_cleaned['Losses'] = df_rams_cleaned['Losses'].apply(pd.to_numeric, errors='coerce')
# # # Convert to date float because int64 can't use NaN (not a number)
# # # https://stackoverflow.com/questions/41550746/error-using-astype-when-nan-exists-in-a-dataframe
# # #
# # #df_rams_cleaned['Season'] = df_rams_cleaned['Season'].astype('int64')
df_rams_cleaned = df_rams_cleaned.astype('float')
# # #df_rams_cleaned = df_rams_cleaned.astype('int64')
# # # Front fill and NaN value.
# # # Front fill means use the last known value
# # # Back fill means use the next known value
df_rams_cleaned = df_rams_cleaned.ffill()
# # # Check type
# # # df_rams_cleaned.dtypes
# # # Make column of win percent
df_rams_cleaned['Win_Percent'] = df_rams_cleaned['Wins'] / (df_rams_cleaned['Wins'] + df_rams_cleaned['Losses'])
# Make column for moving average (rolling mean)
#
# NOTE: You will not see the rolling mean for the first few rows because you need
# the minimum window size before it calculates.
# This will be deprecated in the future
# df_rams_cleaned['Rolling Mean'] = pd.rolling_mean( df_rams_cleaned['Wins'],
#
# # window to calculate
# 10)
df_rams_cleaned['Rolling_Mean'] = df_rams_cleaned['Win_Percent'].rolling( window = 10, center = False).mean()
# Only save from 1980 because that is the only
# overlap from all the data.
#
# NOTE: Not including 2017 to line up with the other data.
df_rams_cleaned = df_rams_cleaned[10:]
# Reset index.
# NOTE: drop = True means do not make a new index and keep old.
# inplace = True means update this variable and not return a copy
# leaving original intact.
df_rams_cleaned.reset_index(drop = True,
inplace = True)
# # #df_rams_cleaned.head(20)
df_rams_cleaned
###Output
_____no_output_____
###Markdown
Save clean data to .csv
###Code
file_name = team_name +'_data_cleaned'
#csv
df_rams_cleaned.to_csv( file_name +'.csv')
###Output
_____no_output_____
###Markdown
Load clean data from file instead of web scrapping.
###Code
file_name = team_name +'_data_cleaned'
df_rams_cleaned = pd.read_csv( file_name +'.csv',
# Use the first column as the index
index_col = 0)
df_rams_cleaned.tail()
###Output
_____no_output_____
###Markdown
Plot moving average
###Code
import matplotlib.pyplot as plt
import numpy as np
#----------------
# Variables
# (Start)
#----------------
graph_color = 'orange'
#----------------
# Variables
# (End)
#----------------
# Create new figure
fig_lakers = plt.figure(figsize = (16,8))
ax = fig_lakers.add_subplot(111)
# TODO
#
# -Set title
# -Set x label
# -Set y lable
# -Set x-axis to be the whole data but only show 10 year intervals
# -Set y-axis for 0.0 to 1.0 but have dotted lines from 0.0, 0.25, 0.75, 1.0 BUT only use the highest that contain data.
# -Set thick lines.
# -Set dotted lines at y-axis intervals
# -Set annotations for names of team next to plot lines
# -Set annotations for win%
# Remove plot box
# Change the name of the figure to be generic for all teams and the save image.
# Title
plt.title('Los Angeles Sports Teams Win %'
'\n(10 Year Moving Average)',
fontsize=20 )
# Labels for x and y axes
plt.xlabel( 'Season',
fontsize=15 )
plt.ylabel( '10 Year Moving Average Win %',
fontsize=15 )
# Create graph
plot_lakers = plt.plot( df_rams_cleaned['Season'],
df_rams_cleaned['Rolling_Mean'],
c=graph_color,
label='Lakers')
# Set limit on x-axis
#ax.set_xlim([datetime.date(2016, 1, 1), datetime.date(2016, 12, 31)])
ax.set_ylim(0.0, 0.85)
# Set line thickness and style (like dotted)
# https://matplotlib.org/examples/pylab_examples/set_and_get.html
# plt.setp(plot_lakers,
# linestyle='--')
plt.setp(plot_lakers,
linewidth=4)
# https://stackoverflow.com/questions/24943991/matplotlib-change-grid-interval-and-specify-tick-labels
#
# Set x-axis to be the whole data but only show 10 year intervals
x_major_ticks = np.arange(1980, 2020, 10)
#x_minor_ticks = np.arange(1980, 2020, 1)
#
ax.set_xticks(x_major_ticks)
# ax.set_xticks(x_minor_ticks, minor=True)
#
# Set y-axis for 0.0 to 1.0 but have dotted lines from 0.0, 0.25, 0.75, 1.0 BUT only use the highest that contain data.
y_major_ticks = np.arange(0.0, 1.1, 0.25)
#
# Slice to exclude the first and last entry.
#y_major_ticks = y_major_ticks[:-1]
ax.set_yticks(y_major_ticks)
# Draw horizontal lines
for num in y_major_ticks:
plt.axhline(y = num,
linestyle = '--',
color = 'grey',
alpha = 0.2 )
# Text for team names.
# Setting up equations in matplotlib text.
# https://matplotlib.org/users/mathtext.html
#team_name = 'Lakers'
x_pos = 2017
y_pos = df_rams_cleaned['Rolling_Mean'].iloc[-1]
team_color = graph_color
font_size = 10
# # Drop Shadow
# plt.text(x_pos + 0.0005,
# y_pos - 0.0005,
# team_name,
# color = 'black',
# fontsize = font_size)
plt.text(x_pos,
y_pos,
team_name,
color = team_color,
fontsize = font_size)
# Legend
plt.text(1980,
0.1,
#'Win % = Games Won\(Games Won + Games Lost)',
#r'$\frac{5 - \frac{1}{x}}{4}$',
r'Win % = $\frac{Games Won}{Games Won + Games Lost}$',
fontsize = 15,
bbox={'facecolor':'lightgrey', 'alpha':0.5, 'pad':5})
# Remove axis
#plt.axis('off')
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Show the graph.
plt.show()
###Output
_____no_output_____
###Markdown
Save graph to .png
###Code
# https://stackoverflow.com/questions/9012487/matplotlib-pyplot-savefig-outputs-blank-image
# plt.show() creates a new figure.
# Use the figure captured when created.
#plt.savefig('Los_Angeles_Sports_Teams_Win_Percentage.png')
file_name = team_name +'_win_percentage'
# For some reason, matplotlib saves the image name with upper case as the first letter
# even if it is lowercase.
fig_lakers.savefig( file_name +'.png' )
###Output
_____no_output_____ |
Backend/.ipynb_checkpoints/PennGraderBackend-checkpoint.ipynb | ###Markdown
PennGrader: Teacher BackendThis notebook will let you quickly create homeworks and analyze student's submissions. Step 1: Configuration Fill in the cell below with the HOMEWORK_ID and SECRET_KEY obtained when you first created your homework.
###Code
HOMEWORK_ID = 'test_homework_id'
SECRET_KEY = 'test_secret_key'
backend = PennGraderBackend(homework_id = HOMEWORK_ID, secret_key = SECRET_KEY)
###Output
###Markdown
Step 2: Import LibrariesIn the cell below import all libraries needed for grading.
###Code
import numpy as np
import sklearn
from sklearn.metrics import accuracy_score, auc
# from sklearn import svm
import pandas as pd
# #import tensorflow
# import sklearn
accuracy_score([1,2],[3,1])
auc([1,2],[3,1])
get_imported_libraries()
###Output
_____no_output_____
###Markdown
Step 3: Write Test CasesDefine a test case function for each question.A test case function takes in a single input containing the student's answer and returns a tuple `(student score:int, maximum score:int)`. See example below:
###Code
def test_case_0_1(answer): # [answer] can be of any type (i.e a function, a string, a dataframe, a class, etc) #
student_score = 0
max_score = 2
if answer == 'Correct answer':
student_score = 2
elif answer == 'Not so correct answer':
student_score = 1
else:
student_score = 0
return (student_score, max_score)
###Output
_____no_output_____
###Markdown
Below fill in the `test_cases` dictionary to map the test case's name (homework section for example) to the cooresponding function you defined above:
###Code
test_cases = {
'0.1' : test_case_0_1 # Test cases for section 0.1
}
serialize(test_cases)
###Output
_____no_output_____
###Markdown
Step 4: Upload Test CasesRun the cell below to upload/update the homework's test cases
###Code
backend.upload_test_cases(test_cases)
###Output
Success: Test cases updated successfully.
|
Supervised Learning/i) Model Evaluation Metrics/Regression Matrix/Solution Regression Metrics.ipynb | ###Markdown
Boston Housing DataIn order to gain a better understanding of the metrics used in regression settings, we will be looking at the Boston Housing dataset. First use the cell below to read in the dataset and set up the training and testing data that will be used for the rest of this problem.
###Code
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import numpy as np
import tests2 as t
boston = load_boston()
y = boston.target
X = boston.data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
###Output
_____no_output_____
###Markdown
> **Step 1:** Before we get too far, let's do a quick check of the models that you can use in this situation given that you are working on a regression problem. Use the dictionary and corresponding letters below to provide all the possible models you might choose to use.
###Code
# When can you use the model - use each option as many times as necessary
a = 'regression'
b = 'classification'
c = 'both regression and classification'
models = {
'decision trees': c,# Letter here,
'random forest': c,# Letter here,
'adaptive boosting':c, # Letter here,
'logistic regression':b, # Letter here,
'linear regression':a # Letter here
}
#checks your answer, no need to change this code
t.q1_check(models)
###Output
That's right! All but logistic regression can be used for predicting numeric values. And linear regression is the only one of these that you should not use for predicting categories. Technically sklearn won't stop you from doing most of anything you want, but you probably want to treat cases in the way you found by answering this question!
###Markdown
> **Step 2:** Now for each of the models you found in the previous question that can be used for regression problems, import them using sklearn.
###Code
# Import models from sklearn - notice you will want to use
# the regressor version (not classifier) - googling to find
# each of these is what we all do!
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
###Output
_____no_output_____
###Markdown
> **Step 3:** Now that you have imported the 4 models that can be used for regression problems, instantate each below.
###Code
# Instantiate each of the models you imported
# For now use the defaults for all the hyperparameters
tree_mod = DecisionTreeRegressor()
rf_mod = RandomForestRegressor()
ada_mod = AdaBoostRegressor()
reg_mod = LinearRegression()
###Output
_____no_output_____
###Markdown
> **Step 4:** Fit each of your instantiated models on the training data.
###Code
# Fit each of your models using the training data
tree_mod.fit(X_train, y_train)
rf_mod.fit(X_train, y_train)
ada_mod.fit(X_train, y_train)
reg_mod.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
> **Step 5:** Use each of your models to predict on the test data.
###Code
# Predict on the test values for each model
preds_tree = tree_mod.predict(X_test)
preds_rf = rf_mod.predict(X_test)
preds_ada = ada_mod.predict(X_test)
preds_reg = reg_mod.predict(X_test)
###Output
_____no_output_____
###Markdown
> **Step 6:** Now for the information related to this lesson. Use the dictionary to match the metrics that are used for regression and those that are for classification.
###Code
# potential model options
a = 'regression'
b = 'classification'
c = 'both regression and classification'
#
metrics = {
'precision': b,# Letter here,
'recall': b,# Letter here,
'accuracy': b,# Letter here,
'r2_score': a,# Letter here,
'mean_squared_error': a,# Letter here,
'area_under_curve': b,# Letter here,
'mean_absolute_area':a # Letter here
}
#checks your answer, no need to change this code
t.q6_check(metrics)
###Output
That's right! Looks like you know your metrics!
###Markdown
> **Step 6:** Now that you have identified the metrics that can be used in for regression problems, use sklearn to import them.
###Code
# Import the metrics from sklearn
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
###Output
_____no_output_____
###Markdown
> **Step 7:** Similar to what you did with classification models, let's make sure you are comfortable with how exactly each of these metrics is being calculated. We can then match the value to what sklearn provides.
###Code
def r2(actual, preds):
'''
INPUT:
actual - numpy array or pd series of actual y values
preds - numpy array or pd series of predicted y values
OUTPUT:
returns the r-squared score as a float
'''
sse = np.sum((actual-preds)**2)
sst = np.sum((actual-np.mean(actual))**2)
return 1 - sse/sst
# Check solution matches sklearn
print(r2(y_test, preds_tree))
print(r2_score(y_test, preds_tree))
print("Since the above match, we can see that we have correctly calculated the r2 value.")
###Output
0.745910665922
0.745910665922
Since the above match, we can see that we have correctly calculated the r2 value.
###Markdown
> **Step 8:** Your turn fill in the function below and see if your result matches the built in for mean_squared_error.
###Code
def mse(actual, preds):
'''
INPUT:
actual - numpy array or pd series of actual y values
preds - numpy array or pd series of predicted y values
OUTPUT:
returns the mean squared error as a float
'''
return np.sum((actual-preds)**2)/len(actual)
# Check your solution matches sklearn
print(mse(y_test, preds_tree))
print(mean_squared_error(y_test, preds_tree))
print("If the above match, you are all set!")
###Output
19.2291017964
19.2291017964
If the above match, you are all set!
###Markdown
> **Step 9:** Now one last time - complete the function related to mean absolute error. Then check your function against the sklearn metric to assure they match.
###Code
def mae(actual, preds):
'''
INPUT:
actual - numpy array or pd series of actual y values
preds - numpy array or pd series of predicted y values
OUTPUT:
returns the mean absolute error as a float
'''
return np.sum(np.abs(actual-preds))/len(actual)
# Check your solution matches sklearn
print(mae(y_test, preds_tree))
print(mean_absolute_error(y_test, preds_tree))
print("If the above match, you are all set!")
###Output
3.0622754491
3.0622754491
If the above match, you are all set!
###Markdown
> **Step 10:** Which model performed the best in terms of each of the metrics? Note that r2 and mse will always match, but the mae may give a different best model. Use the dictionary and space below to match the best model via each metric.
###Code
#match each metric to the model that performed best on it
a = 'decision tree'
b = 'random forest'
c = 'adaptive boosting'
d = 'linear regression'
best_fit = {
'mse':b, # letter here,
'r2': b,# letter here,
'mae':b, # letter here
}
#Tests your answer - don't change this code
t.check_ten(best_fit)
# cells for work
def print_metrics(y_true, preds, model_name=None):
'''
INPUT:
y_true - the y values that are actually true in the dataset (numpy array or pandas series)
preds - the predictions for those values from some model (numpy array or pandas series)
model_name - (str - optional) a name associated with the model if you would like to add it to the print statements
OUTPUT:
None - prints the mse, mae, r2
'''
if model_name == None:
print('Mean Squared Error: ', format(mean_squared_error(y_true, preds)))
print('Mean Absolute Error: ', format(mean_absolute_error(y_true, preds)))
print('R2 Score: ', format(r2_score(y_true, preds)))
print('\n\n')
else:
print('Mean Squared Error ' + model_name + ' :' , format(mean_squared_error(y_true, preds)))
print('Mean Absolute Error ' + model_name + ' :', format(mean_absolute_error(y_true, preds)))
print('R2 Score ' + model_name + ' :', format(r2_score(y_true, preds)))
print('\n\n')
# Print Decision Tree scores
print_metrics(y_test, preds_tree, 'tree')
# Print Random Forest scores
print_metrics(y_test, preds_rf, 'random forest')
# Print AdaBoost scores
print_metrics(y_test, preds_ada, 'adaboost')
# Linear Regression scores
print_metrics(y_test, preds_reg, 'linear reg')
###Output
Mean Squared Error tree : 19.229101796407186
Mean Absolute Error tree : 3.0622754491017963
R2 Score tree : 0.7459106659216266
Mean Squared Error random forest : 11.331308982035926
Mean Absolute Error random forest : 2.275688622754491
R2 Score random forest : 0.8502704502807432
Mean Squared Error adaboost : 12.55590949065612
Mean Absolute Error adaboost : 2.5139163622137564
R2 Score adaboost : 0.8340888349852498
Mean Squared Error linear reg : 20.747143360309067
Mean Absolute Error linear reg : 3.1512878365884154
R2 Score linear reg : 0.7258515818230032
|
notebooks/lists_more.ipynb | ###Markdown
Lists and Tuples===In this notebook, you will learn about lists, a super important data structure, that allows you to store more than one value in a single variable. This is one of the most powerful ideas in programming and introduces a number of other central concepts such as loops. [Previous: Variables, Strings, and Numbers](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/var_string_num.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) Contents===- [Lists](Lists) - [Introducing Lists](Introducing-Lists) - [Example](Example) - [Naming and defining a list](Naming-and-defining-a-list) - [Accessing one item in a list](Accessing-one-item-in-a-list) - [Exercises](Exercises-lists) - [Lists and Looping](Lists-and-Looping) - [Accessing all elements in a list](Accessing-all-elements-in-a-list) - [Enumerating a list](Enumerating-a-list) - [Exercises](Exercises-loops) - [Common List Operations](Common-List-Operations) - [Modifying elements in a list](Modifying-elements-in-a-list) - [Finding an element in a list](Finding-an-element-in-a-list) - [Testing whether an element is in a list](Testing-whether-an-element-is-in-a-list) - [Adding items to a list](Adding-items-to-a-list) - [Creating an empty list](Creating-an-empty-list) - [Sorting a list](Sorting-a-list) - [Finding the length of a list](Finding-the-length-of-a-list) - [Exercises](Exercises-operations) - [Removing Items from a List](Removing-Items-from-a-List) - [Removing items by position](Removing-items-by-position) - [Removing items by value](Removing-items-by-value) - [Popping items](Popping-items) - [Exercises](Exercises-removing) - [Want to see what functions are?](Want-to-see-what-functions-are?) - [Slicing a List](Slicing-a-list) - [Copying a list](Copying-a-list) - [Exercises](Exercises_slicing) - [Numerical Lists](Numerical-Lists) - [The *range()* function](The-*range()*-function) - [The *min()*, *max()*, *sum()* functions](min_max_sum) - [Exercises](Exercises_numerical) - [List Comprehensions](List-Comprehensions) - [Numerical comprehensions](Numerical-comprehensions) - [Non-numerical comprehensions](Non-numerical-comprehensions) - [Exercises](Exercises_comprehensions) - [Strings as Lists](Strings-as-lists) - [Strings as a list of characters](Strings-as-a-list-of-characters) - [Slicing strings](Slicing-strings) - [Finding substrings](Finding-substrings) - [Replacing substrings](Replacing-substrings) - [Counting substrings](Counting-substrings) - [Splitting strings](Splitting-strings) - [Other string methods](Other-string-methods) - [Exercises](Exercises-strings-as-lists) - [Challenges](Challenges-strings-as-lists) - [Tuples](Tuples) - [Defining tuples, and accessing elements](Defining-tuples,-and-accessing-elements) - [Using tuples to make strings](Using-tuples-to-make-strings) - [Exercises](Exercises_tuples) - [Coding Style: PEP 8](Coging-Style:-PEP-8) - [Why have style conventions?](Why-have-style-conventions?) - [What is a PEP?](What-is-a-PEP?) - [Basic Python style guidelines](Basic-Python-style-guidelines) - [Exercises](Exercises-pep8) - [Overall Challenges](Overall-Challenges) Lists=== Introducing Lists===Example---A list is a collection of items that is stored in a variable. The items should be related in some way, but there are no restrictions on what can be stored in a list. Here is a simple example of a list, and how we can quickly access each item in the list.Lists are called "arrays" in many languages. Python has a related data-structure called an array that is part of the `numpy` (numerical python) package. We will talk about differences between lists and arrays later on. Naming and defining a list---Since lists are collection of objects, it is good practice to give them a plural name. If each item in your list is an image, call the list `images`. If each item is a trial, call it `trials`. This gives you a straightforward way to refer to the entire list ('images'), and to a single item in the list ('image').In Python, lists are designated by square brackets. You can define an empty list like this:
###Code
images = []
###Output
_____no_output_____
###Markdown
To define a list with some initial values, you include the values within the square brackets
###Code
images = ['dog', 'cat', 'panda']
###Output
_____no_output_____
###Markdown
Accessing one item in a list---Items in a list are identified by their position in the list, starting with zero. This sometimes trips people up. To access the first element in a list, you give the name of the list, followed by a zero in parentheses.
###Code
images = ['dog', 'cat', 'panda']
print images[0]
###Output
dog
###Markdown
The number in parentheses is called the **index** of the item. Because lists start at zero, the index of an item is always one less than its position in the list. So to get the second item in the list, we need to use an index of 1.
###Code
images = ['dog', 'cat', 'panda']
print images[1]
###Output
cat
###Markdown
Accessing the last items in a listYou can probably see that to get the last item in this list, we would use an index of 2. This works, but it would only work because our list has exactly three items. Because it is so common for us to need the *last* value of the list, Python provides a simple way of doing it without needing to know how long the list is. To get the last item of the list, we use -1.
###Code
###highlight=[4]
images = ['dog', 'cat', 'panda']
print images[-1]
###Output
panda
###Markdown
This syntax also works for the second to last item, the third to last, and so forth.
###Code
###highlight=[4]
images = ['dog', 'cat', 'panda']
print images[-2]
###Output
cat
###Markdown
If you attemp to use a negative number larger than the length of the list you will get an IndexError:
###Code
###highlight=[4]
images = ['dog', 'cat', 'panda']
print images[-4]
###Output
_____no_output_____
###Markdown
If you are used to the syntax of some other languages, you may be tempted to get the last element in a list using syntax like `images[len(images)]`. This syntax will give you the same output as `images[-1]` but is more verbose, less clear, and thus dispreferred. [top]() Exercises--- First List- Store the values 'python', 'c', and 'java' in a list. Print each of these values out, using their position in the list. First Neat List- Store the values 'python', 'c', and 'java' in a list. Print a statement about each of these values, using their position in the list.- Your statement could simply be, 'A nice programming language is *value*.' Your First List- Think of something you can store in a list. Make a list with three or four items, and then print a message that includes at least one item from your list. Your sentence could be as simple as, "One item in my list is a ____." [top]() Lists and Looping=== Accessing all elements in a list---This is one of the most important concepts related to lists. You can have a list with a million items in it, and in three lines of code you can write a sentence for each of those million items. If you want to understand lists, and become a competent programmer, make sure you take the time to understand this section.We use a loop to access all the elements in a list. A loop is a block of code that repeats itself until it runs out of items to work with, or until a certain condition is met. In this case, our loop will run once for every item in our list. With a list that is three items long, our loop will run three times.Let's take a look at how we access all the items in a list, and then try to understand how it works.
###Code
images = ['dog', 'cat', 'red tailed raccoon']
for image in images:
print image
###Output
dog
cat
panda
red tailed raccoon
###Markdown
If you want to see all the values in a list, e.g., for purposes of debugging, you you can simply print a list like so:`print images` to see all the values of the list.
###Code
print images
###Output
['dog', 'cat', 'panda', 'red tailed raccoon']
###Markdown
We have already seen how to create a list, so we are really just trying to understand how the last two lines work. These last two lines make up a loop, and the language here can help us see what is happening: for image in images:- The keyword "for" tells Python to get ready to use a loop.- The variable "image", with no "s" on it, is a temporary placeholder variable. This is the variable that Python will place each item in the list into, one at a time. This variable can be given any name, e.g., cur_image, or image_to__show but using a convention like image/images makes your code more understandable. - The first time through the loop, the value of "image" will be 'dog'.- The second time through the loop, the value of "image" will be 'cat'.- The third time through, "name" will be 'red tailed raccoon'.- After this, there are no more items in the list, and the loop will end.Notice that the last element in the list has several words. Despite containing multiple words, it is a single string. List values need not be strings. They can be any data-type including other lists, files, and functions. See [https://swcarpentry.github.io/python-novice-inflammation/03-lists/](these examples) for slightly more involved usages of lists.The site pythontutor.com allows you to run Python code one line at a time. As you run the code, there is also a visualization on the screen that shows you how the variable "dog" holds different values as the loop progresses. There is also an arrow that moves around your code, showing you how some lines are run just once, while other lines are run multiple tiimes. If you would like to see this in action, click the Forward button and watch the visualization, and the output as it is printed to the screen. Tools like this are incredibly valuable for seeing what Python is doing with your code. Doing more with each itemWe can do whatever we want with the value of "dog" inside the loop. In this case, we just print the name of the dog. print dog We are not limited to just printing the word dog. We can do whatever we want with this value, and this action will be carried out for every item in the list. Let's say something about each dog in our list.
###Code
###highlight=[5]
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print('I like ' + dog + 's.')
###Output
I like border collies.
I like australian cattle dogs.
I like labrador retrievers.
###Markdown
Visualize this on pythontutor. Inside and outside the loopPython uses indentation to decide what is inside the loop and what is outside the loop. Code that is inside the loop will be run for every item in the list. Code that is not indented, which comes after the loop, will be run once just like regular code.
###Code
###highlight=[6,7,8]
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print('I like ' + dog + 's.')
print('No, I really really like ' + dog +'s!\n')
print("\nThat's just how I feel about dogs.")
###Output
I like border collies.
No, I really really like border collies!
I like australian cattle dogs.
No, I really really like australian cattle dogs!
I like labrador retrievers.
No, I really really like labrador retrievers!
That's just how I feel about dogs.
###Markdown
Notice that the last line only runs once, after the loop is completed. Also notice the use of newlines ("\n") to make the output easier to read. Run this code on pythontutor. [top]() Enumerating a list---When you are looping through a list, you may sometimes not only want to access the current list element, but also want to know the index of the current item. The preferred (*Pythonic*) way of doing this is to use the `enumerate()` function which conveniently tracks the index of each item for you, as you loop through the list: To enumerate a list, you need to add an *index* variable to hold the current index. So instead of for dog in dogs: You have for index, dog in enumerate(dogs) The value in the variable *index* is always an integer. If you want to print it in a string, you have to turn the integer into a string: str(index) The index always starts at 0, so in this example the value of *place* should actually be the current index, plus one:
###Code
people = ['Desia', 'Pablo', 'Matt', 'Vincent', 'Tamara', 'Mengguo', 'Ian', 'Rui', 'Yuvraj', 'Steven', 'Katharine', 'Sasha', 'Nathan', 'Kristina', 'Olivia']
for i, person in enumerate(sorted(people)):
print "Person number " + str(i) + " in the class is " + person
###Output
_____no_output_____
###Markdown
A common looping errorOne common looping error occurs when instead of using the single variable *dog* inside the loop, we accidentally use the variable that holds the entire list:
###Code
###highlight=[5]
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print(dogs)
###Output
['border collie', 'australian cattle dog', 'labrador retriever']
['border collie', 'australian cattle dog', 'labrador retriever']
['border collie', 'australian cattle dog', 'labrador retriever']
###Markdown
In this example, instead of printing each dog in the list, we print the entire list every time we go through the loop. Python puts each individual item in the list into the variable *dog*, but we never use that variable. Sometimes you will just get an error if you try to do this:
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print('I like ' + dogs + 's.')
###Output
_____no_output_____
###Markdown
Exercises--- First List - Loop- Repeat *First List*, but this time use a loop to print out each value in the list. First Neat List - Loop- Repeat *First Neat List*, but this time use a loop to print out your statements. Make sure you are writing the same sentence for all values in your list. Loops are not effective when you are trying to generate different output for each value in your list. Your First List - Loop- Repeat *Your First List*, but this time use a loop to print out your message for each item in your list. Again, if you came up with different messages for each value in your list, decide on one message to repeat for each value in your list. [top]() Common List Operations=== Modifying elements in a list---You can change the value of any element in a list if you know the position of that item.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs[0] = 'australian shepherd'
print(dogs)
###Output
['australian shepherd', 'australian cattle dog', 'labrador retriever']
###Markdown
Finding an element in a list---If you want to find out the position of an element in a list, you can use the index() function.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print(dogs.index('australian cattle dog'))
###Output
1
###Markdown
This method returns a ValueError if the requested item is not in the list.
###Code
###highlight=[4]
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print(dogs.index('poodle'))
###Output
_____no_output_____
###Markdown
Testing whether an item is in a list---You can test whether an item is in a list using the "in" keyword. This will become more useful after learning how to use if-else statements.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print('australian cattle dog' in dogs)
print('poodle' in dogs)
###Output
True
False
###Markdown
Adding items to a list--- Appending items to the end of a listWe can add an item to a list using the append() method. This method adds the new item to the end of the list.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs.append('poodle')
for dog in dogs:
print(dog.title() + "s are cool.")
###Output
Border Collies are cool.
Australian Cattle Dogs are cool.
Labrador Retrievers are cool.
Poodles are cool.
###Markdown
Inserting items into a listWe can also insert items anywhere we want in a list, using the **insert()** function. We specify the position we want the item to have, and everything from that point on is shifted one position to the right. In other words, the index of every item after the new item is increased by one.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs.insert(1, 'poodle')
print(dogs)
###Output
['border collie', 'poodle', 'australian cattle dog', 'labrador retriever']
###Markdown
Note that you have to give the position of the new item first, and then the value of the new item. If you do it in the reverse order, you will get an error. Creating an empty list---Now that we know how to add items to a list after it is created, we can use lists more dynamically. We are no longer stuck defining our entire list at once.A common approach with lists is to define an empty list, and then let your program add items to the list as necessary. This approach works, for example, when starting to build an interactive web site. Your list of users might start out empty, and then as people register for the site it will grow. This is a simplified approach to how web sites actually work, but the idea is realistic.Here is a brief example of how to start with an empty list, start to fill it up, and work with the items in the list. The only new thing here is the way we define an empty list, which is just an empty set of square brackets.
###Code
# Create an empty list to hold our users.
names = []
# Add some users.
names.append('Desia')
names.append('Pablo')
names.append('Matt')
# Greet everyone.
for name in names:
print "Welcome, " + name + '!'
###Output
Welcome, Desia!
Welcome, Pablo!
Welcome, Matt!
###Markdown
If we don't change the order in our list, we can use the list to figure out who our oldest and newest users are.
###Code
###highlight=[10,11,12]
# Create an empty list to hold our users.
names = []
# Add some users.
names.append('Desia')
names.append('Pablo')
names.append('Matt')
# Greet everyone.
for name in names:
print "Welcome, " + name + '!'
# Recognize our first user, and welcome our newest user.
print("\nThank you for being our very first user, " + names[0].title() + '!')
print("And a warm welcome to our newest user, " + names[-1].title() + '!')
###Output
Welcome, Bernice!
Welcome, Cody!
Welcome, Aaron!
Thank you for being our very first user, Bernice!
And a warm welcome to our newest user, Aaron!
###Markdown
Note that the code welcoming our newest user will always work, because we have used the index -1. If we had used the index 2 we would always get the third user, even as our list of users grows and grows. Sorting a List---We can sort a list alphabetically, in either order.
###Code
students = ['bernice', 'aaron', 'cody']
# Put students in alphabetical order.
students.sort()
# Display the list in its current order.
print("Our students are currently in alphabetical order.")
for student in students:
print(student.title())
#Put students in reverse alphabetical order.
students.sort(reverse=True)
# Display the list in its current order.
print("\nOur students are now in reverse alphabetical order.")
for student in students:
print(student.title())
###Output
Our students are currently in alphabetical order.
Aaron
Bernice
Cody
Our students are now in reverse alphabetical order.
Cody
Bernice
Aaron
###Markdown
*sorted()* vs. *sort()*Whenever you consider sorting a list, keep in mind that you can not recover the original order. If you want to display a list in sorted order, but preserve the original order, you can use the *sorted()* function. The *sorted()* function also accepts the optional *reverse=True* argument.
###Code
students = ['bernice', 'aaron', 'cody']
# Display students in alphabetical order, but keep the original order.
print("Here is the list in alphabetical order:")
for student in sorted(students):
print(student.title())
# Display students in reverse alphabetical order, but keep the original order.
print("\nHere is the list in reverse alphabetical order:")
for student in sorted(students, reverse=True):
print(student.title())
print("\nHere is the list in its original order:")
# Show that the list is still in its original order.
for student in students:
print(student.title())
###Output
Here is the list in alphabetical order:
Aaron
Bernice
Cody
Here is the list in reverse alphabetical order:
Cody
Bernice
Aaron
Here is the list in its original order:
Bernice
Aaron
Cody
###Markdown
Reversing a listWe have seen three possible orders for a list:- The original order in which the list was created- Alphabetical order- Reverse alphabetical orderThere is one more order we can use, and that is the reverse of the original order of the list. The *reverse()* function gives us this order.
###Code
students = ['bernice', 'aaron', 'cody']
students.reverse()
print(students)
###Output
['cody', 'aaron', 'bernice']
###Markdown
Note that reverse is permanent, although you could follow up with another call to *reverse()* and get back the original order of the list. Sorting a numerical listAll of the sorting functions work for numerical lists as well.
###Code
numbers = [1, 3, 4, 2]
# sort() puts numbers in increasing order.
numbers.sort()
print(numbers)
# sort(reverse=True) puts numbers in decreasing order.
numbers.sort(reverse=True)
print(numbers)
numbers = [1, 3, 4, 2]
# sorted() preserves the original order of the list:
print(sorted(numbers))
print(numbers)
numbers = [1, 3, 4, 2]
# The reverse() function also works for numerical lists.
numbers.reverse()
print(numbers)
###Output
[2, 4, 3, 1]
###Markdown
Finding the length of a list---You can find the length of a list using the *len()* function.
###Code
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print(user_count)
###Output
3
###Markdown
There are many situations where you might want to know how many items in a list. If you have a list that stores your users, you can find the length of your list at any time, and know how many users you have.
###Code
# Create an empty list to hold our users.
usernames = []
# Add some users, and report on how many users we have.
usernames.append('bernice')
user_count = len(usernames)
print("We have " + str(user_count) + " user!")
usernames.append('cody')
usernames.append('aaron')
user_count = len(usernames)
print("We have " + str(user_count) + " users!")
###Output
We have 1 user!
We have 3 users!
###Markdown
On a technical note, the *len()* function returns an integer, which can't be printed directly with strings. We use the *str()* function to turn the integer into a string so that it prints nicely:
###Code
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print("This will cause an error: " + user_count)
###highlight=[5]
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print("This will work: " + str(user_count))
###Output
This will work: 3
###Markdown
Exercises--- Working List- Make a list that includes four careers, such as 'programmer' and 'truck driver'.- Use the *list.index()* function to find the index of one career in your list.- Use the *in* function to show that this career is in your list.- Use the *append()* function to add a new career to your list.- Use the *insert()* function to add a new career at the beginning of the list.- Use a loop to show all the careers in your list. Starting From Empty- Create the list you ended up with in *Working List*, but this time start your file with an empty list and fill it up using *append()* statements.- Print a statement that tells us what the first career you thought of was.- Print a statement that tells us what the last career you thought of was. Ordered Working List- Start with the list you created in *Working List*.- You are going to print out the list in a number of different orders.- Each time you print the list, use a for loop rather than printing the raw list.- Print a message each time telling us what order we should see the list in. - Print the list in its original order. - Print the list in alphabetical order. - Print the list in its original order. - Print the list in reverse alphabetical order. - Print the list in its original order. - Print the list in the reverse order from what it started. - Print the list in its original order - Permanently sort the list in alphabetical order, and then print it out. - Permanently sort the list in reverse alphabetical order, and then print it out. Ordered Numbers- Make a list of 5 numbers, in a random order.- You are going to print out the list in a number of different orders.- Each time you print the list, use a for loop rather than printing the raw list.- Print a message each time telling us what order we should see the list in. - Print the numbers in the original order. - Print the numbers in increasing order. - Print the numbers in the original order. - Print the numbers in decreasing order. - Print the numbers in their original order. - Print the numbers in the reverse order from how they started. - Print the numbers in the original order. - Permanently sort the numbers in increasing order, and then print them out. - Permanently sort the numbers in descreasing order, and then print them out. List Lengths- Copy two or three of the lists you made from the previous exercises, or make up two or three new lists.- Print out a series of statements that tell us how long each list is. [top]() Removing Items from a List===Hopefully you can see by now that lists are a dynamic structure. We can define an empty list and then fill it up as information comes into our program. To become really dynamic, we need some ways to remove items from a list when we no longer need them. You can remove items from a list through their position, or through their value. Removing items by position---If you know the position of an item in a list, you can remove that item using the *del* command. To use this approach, give the command *del* and the name of your list, with the index of the item you want to move in square brackets:
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# Remove the first dog from the list.
del dogs[0]
print(dogs)
###Output
['australian cattle dog', 'labrador retriever']
###Markdown
Removing items by value---You can also remove an item from a list if you know its value. To do this, we use the *remove()* function. Give the name of the list, followed by the word remove with the value of the item you want to remove in parentheses. Python looks through your list, finds the first item with this value, and removes it.
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# Remove australian cattle dog from the list.
dogs.remove('australian cattle dog')
print(dogs)
###Output
['border collie', 'labrador retriever']
###Markdown
Be careful to note, however, that *only* the first item with this value is removed. If you have multiple items with the same value, you will have some items with this value left in your list.
###Code
letters = ['a', 'b', 'c', 'a', 'b', 'c']
# Remove the letter a from the list.
letters.remove('a')
print(letters)
###Output
['b', 'c', 'a', 'b', 'c']
###Markdown
Popping items from a list---There is a cool concept in programming called "popping" items from a collection. Every programming language has some sort of data structure similar to Python's lists. All of these structures can be used as queues, and there are various ways of processing the items in a queue.One simple approach is to start with an empty list, and then add items to that list. When you want to work with the items in the list, you always take the last item from the list, do something with it, and then remove that item. The *pop()* function makes this easy. It removes the last item from the list, and gives it to us so we can work with it. This is easier to show with an example:
###Code
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
last_dog = dogs.pop()
print(last_dog)
print(dogs)
###Output
labrador retriever
['border collie', 'australian cattle dog']
###Markdown
This is an example of a first-in, last-out approach. The first item in the list would be the last item processed if you kept using this approach. We will see a full implementation of this approach later on, when we learn about *while* loops.You can actually pop any item you want from a list, by giving the index of the item you want to pop. So we could do a first-in, first-out approach by popping the first iem in the list:
###Code
###highlight=[3]
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
first_dog = dogs.pop(0)
print(first_dog)
print(dogs)
###Output
border collie
['australian cattle dog', 'labrador retriever']
###Markdown
Exercises--- Famous People- Make a list that includes the names of four famous people.- Remove each person from the list, one at a time, using each of the four methods we have just seen: - Pop the last item from the list, and pop any item except the last item. - Remove one item by its position, and one item by its value.- Print out a message that there are no famous people left in your list, and print your list to prove that it is empty. [top]() Want to see what functions are?===At this point, you might have noticed we have a fair bit of repetetive code in some of our examples. This repetition will disappear once we learn how to use functions. If this repetition is bothering you already, you might want to go look at [Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) before you do any more exercises in this section. Slicing a List===Since a list is a collection of items, we should be able to get any subset of those items. For example, if we want to get just the first three items from the list, we should be able to do so easily. The same should be true for any three items in the middle of the list, or the last three items, or any x items from anywhere in the list. These subsets of a list are called *slices*.To get a subset of a list, we give the position of the first item we want, and the position of the first item we do *not* want to include in the subset. So the slice *list[0:3]* will return a list containing items 0, 1, and 2, but not item 3. Here is how you get a batch containing the first three items.
###Code
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[0:3]
for user in first_batch:
print(user.title())
###Output
Bernice
Cody
Aaron
###Markdown
If you want to grab everything up to a certain position in the list, you can also leave the first index blank:
###Code
###highlight=[5]
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[:3]
for user in first_batch:
print(user.title())
###Output
Bernice
Cody
Aaron
###Markdown
When we grab a slice from a list, the original list is not affected:
###Code
###highlight=[7,8,9]
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[0:3]
# The original list is unaffected.
for user in usernames:
print(user.title())
###Output
Bernice
Cody
Aaron
Ever
Dalia
###Markdown
We can get any segment of a list we want, using the slice method:
###Code
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab a batch from the middle of the list.
middle_batch = usernames[1:4]
for user in middle_batch:
print(user.title())
###Output
Cody
Aaron
Ever
###Markdown
To get all items from one position in the list to the end of the list, we can leave off the second index:
###Code
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab all users from the third to the end.
end_batch = usernames[2:]
for user in end_batch:
print(user.title())
###Output
Aaron
Ever
Dalia
###Markdown
Copying a listYou can use the slice notation to make a copy of a list, by leaving out both the starting and the ending index. This causes the slice to consist of everything from the first item to the last, which is the entire list.
###Code
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Make a copy of the list.
copied_usernames = usernames[:]
print("The full copied list:\n\t", copied_usernames)
# Remove the first two users from the copied list.
del copied_usernames[0]
del copied_usernames[0]
print("\nTwo users removed from copied list:\n\t", copied_usernames)
# The original list is unaffected.
print("\nThe original list:\n\t", usernames)
###Output
The full copied list:
['bernice', 'cody', 'aaron', 'ever', 'dalia']
Two users removed from copied list:
['aaron', 'ever', 'dalia']
The original list:
['bernice', 'cody', 'aaron', 'ever', 'dalia']
###Markdown
Exercises--- Alphabet Slices- Store the first ten letters of the alphabet in a list.- Use a slice to print out the first three letters of the alphabet.- Use a slice to print out any three letters from the middle of your list.- Use a slice to print out the letters from any point in the middle of your list, to the end. Protected List- Your goal in this exercise is to prove that copying a list protects the original list.- Make a list with three people's names in it.- Use a slice to make a copy of the entire list.- Add at least two new names to the new copy of the list.- Make a loop that prints out all of the names in the original list, along with a message that this is the original list.- Make a loop that prints out all of the names in the copied list, along with a message that this is the copied list. [top]() Numerical Lists===There is nothing special about lists of numbers, but there are some functions you can use to make working with numerical lists more efficient. Let's make a list of the first ten numbers, and start working with it to see how we can use numbers in a list.
###Code
# Print out the first ten numbers.
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for number in numbers:
print(number)
###Output
1
2
3
4
5
6
7
8
9
10
###Markdown
The *range()* function---This works, but it is not very efficient if we want to work with a large set of numbers. The *range()* function helps us generate long lists of numbers. Here are two ways to do the same thing, using the *range* function.
###Code
# Print the first ten numbers.
for number in range(1,11):
print(number)
###Output
1
2
3
4
5
6
7
8
9
10
###Markdown
The range function takes in a starting number, and an end number. You get all integers, up to but not including the end number. You can also add a *step* value, which tells the *range* function how big of a step to take between numbers:
###Code
# Print the first ten odd numbers.
for number in range(1,21,2):
print(number)
###Output
1
3
5
7
9
11
13
15
17
19
###Markdown
If we want to store these numbers in a list, we can use the *list()* function. This function takes in a range, and turns it into a list:
###Code
# Create a list of the first ten numbers.
numbers = list(range(1,11))
print(numbers)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
###Markdown
This is incredibly powerful; we can now create a list of the first million numbers, just as easily as we made a list of the first ten numbers. It doesn't really make sense to print the million numbers here, but we can show that the list really does have one million items in it, and we can print the last ten items to show that the list is correct.
###Code
# Store the first million numbers in a list.
numbers = list(range(1,1000001))
# Show the length of the list:
print("The list 'numbers' has " + str(len(numbers)) + " numbers in it.")
# Show the last ten numbers:
print("\nThe last ten numbers in the list are:")
for number in numbers[-10:]:
print(number)
###Output
The list 'numbers' has 1000000 numbers in it.
The last ten numbers in the list are:
999991
999992
999993
999994
999995
999996
999997
999998
999999
1000000
###Markdown
There are two things here that might be a little unclear. The expression str(len(numbers))takes the length of the *numbers* list, and turns it into a string that can be printed.The expression numbers[-10:]gives us a *slice* of the list. The index `-1` is the last item in the list, and the index `-10` is the item ten places from the end of the list. So the slice `numbers[-10:]` gives us everything from that item to the end of the list. The *min()*, *max()*, and *sum()* functions---There are three functions you can easily use with numerical lists. As you might expect, the *min()* function returns the smallest number in the list, the *max()* function returns the largest number in the list, and the *sum()* function returns the total of all numbers in the list.
###Code
ages = [23, 16, 14, 28, 19, 11, 38]
youngest = min(ages)
oldest = max(ages)
total_years = sum(ages)
print("Our youngest reader is " + str(youngest) + " years old.")
print("Our oldest reader is " + str(oldest) + " years old.")
print("Together, we have " + str(total_years) + " years worth of life experience.")
###Output
Our youngest reader is 11 years old.
Our oldest reader is 38 years old.
Together, we have 149 years worth of life experience.
###Markdown
Exercises--- First Twenty- Use the *range()* function to store the first twenty numbers (1-20) in a list, and print them out. Larger Sets- Take the *first\_twenty.py* program you just wrote. Change your end number to a much larger number. How long does it take your computer to print out the first million numbers? (Most people will never see a million numbers scroll before their eyes. You can now see this!) Five Wallets- Imagine five wallets with different amounts of cash in them. Store these five values in a list, and print out the following sentences: - "The fattest wallet has $ *value* in it." - "The skinniest wallet has $ *value* in it." - "All together, these wallets have $ *value* in them." [top]() List Comprehensions===I thought carefully before including this section. If you are brand new to programming, list comprehensions may look confusing at first. They are a shorthand way of creating and working with lists. It is good to be aware of list comprehensions, because you will see them in other people's code, and they are really useful when you understand how to use them. That said, if they don't make sense to you yet, don't worry about using them right away. When you have worked with enough lists, you will want to use comprehensions. For now, it is good enough to know they exist, and to recognize them when you see them. If you like them, go ahead and start trying to use them now.Numerical Comprehensions---Let's consider how we might make a list of the first ten square numbers. We could do it like this:
###Code
# Store the first ten square numbers in a list.
# Make an empty list that will hold our square numbers.
squares = []
# Go through the first ten numbers, square them, and add them to our list.
for number in range(1,11):
new_square = number**2
squares.append(new_square)
# Show that our list is correct.
for square in squares:
print(square)
###Output
1
4
9
16
25
36
49
64
81
100
###Markdown
This should make sense at this point. If it doesn't, go over the code with these thoughts in mind:- We make an empty list called *squares* that will hold the values we are interested in.- Using the *range()* function, we start a loop that will go through the numbers 1-10.- Each time we pass through the loop, we find the square of the current number by raising it to the second power.- We add this new value to our list *squares*.- We go through our newly-defined list and print out each square.Now let's make this code more efficient. We don't really need to store the new square in its own variable *new_square*; we can just add it directly to the list of squares. The line new_square = number**2is taken out, and the next line takes care of the squaring:
###Code
###highlight=[8]
# Store the first ten square numbers in a list.
# Make an empty list that will hold our square numbers.
squares = []
# Go through the first ten numbers, square them, and add them to our list.
for number in range(1,11):
squares.append(number**2)
# Show that our list is correct.
for square in squares:
print(square)
###Output
1
4
9
16
25
36
49
64
81
100
###Markdown
List comprehensions allow us to collapse the first three lines of code into one line. Here's what it looks like:
###Code
###highlight=[2,3]
# Store the first ten square numbers in a list.
squares = [number**2 for number in range(1,11)]
# Show that our list is correct.
for square in squares:
print(square)
###Output
1
4
9
16
25
36
49
64
81
100
###Markdown
It should be pretty clear that this code is more efficient than our previous approach, but it may not be clear what is happening. Let's take a look at everything that is happening in that first line:We define a list called *squares*.Look at the second part of what's in square brackets: for number in range(1,11)This sets up a loop that goes through the numbers 1-10, storing each value in the variable *number*. Now we can see what happens to each *number* in the loop: number**2Each number is raised to the second power, and this is the value that is stored in the list we defined. We might read this line in the following way:squares = [raise *number* to the second power, for each *number* in the range 1-10] Another exampleIt is probably helpful to see a few more examples of how comprehensions can be used. Let's try to make the first ten even numbers, the longer way:
###Code
# Make an empty list that will hold the even numbers.
evens = []
# Loop through the numbers 1-10, double each one, and add it to our list.
for number in range(1,11):
evens.append(number*2)
# Show that our list is correct:
for even in evens:
print(even)
###Output
2
4
6
8
10
12
14
16
18
20
###Markdown
Here's how we might think of doing the same thing, using a list comprehension:evens = [multiply each *number* by 2, for each *number* in the range 1-10]Here is the same line in code:
###Code
###highlight=[2,3]
# Make a list of the first ten even numbers.
evens = [number*2 for number in range(1,11)]
for even in evens:
print(even)
###Output
2
4
6
8
10
12
14
16
18
20
###Markdown
Non-numerical comprehensions---We can use comprehensions with non-numerical lists as well. In this case, we will create an initial list, and then use a comprehension to make a second list from the first one. Here is a simple example, without using comprehensions:
###Code
# Consider some students.
students = ['bernice', 'aaron', 'cody']
# Let's turn them into great students.
great_students = []
for student in students:
great_students.append(student.title() + " the great!")
# Let's greet each great student.
for great_student in great_students:
print("Hello, " + great_student)
###Output
Hello, Bernice the great!
Hello, Aaron the great!
Hello, Cody the great!
###Markdown
To use a comprehension in this code, we want to write something like this:great_students = [add 'the great' to each *student*, for each *student* in the list of *students*]Here's what it looks like:
###Code
###highlight=[5,6]
# Consider some students.
students = ['bernice', 'aaron', 'cody']
# Let's turn them into great students.
great_students = [student.title() + " the great!" for student in students]
# Let's greet each great student.
for great_student in great_students:
print("Hello, " + great_student)
###Output
Hello, Bernice the great!
Hello, Aaron the great!
Hello, Cody the great!
###Markdown
Exercises---If these examples are making sense, go ahead and try to do the following exercises using comprehensions. If not, try the exercises without comprehensions. You may figure out how to use comprehensions after you have solved each exercise the longer way. Multiples of Ten- Make a list of the first ten multiples of ten (10, 20, 30... 90, 100). There are a number of ways to do this, but try to do it using a list comprehension. Print out your list. Cubes- We saw how to make a list of the first ten squares. Make a list of the first ten cubes (1, 8, 27... 1000) using a list comprehension, and print them out. Awesomeness- Store five names in a list. Make a second list that adds the phrase "is awesome!" to each name, using a list comprehension. Print out the awesome version of the names. Working Backwards- Write out the following code without using a list comprehension: plus_thirteen = [number + 13 for number in range(1,11)] [top]() Strings as Lists===Now that you have some familiarity with lists, we can take a second look at strings. A string is really a list of characters, so many of the concepts from working with lists behave the same with strings. Strings as a list of characters---We can loop through a string using a *for* loop, just like we loop through a list:
###Code
message = "Hello!"
for letter in message:
print(letter)
###Output
H
e
l
l
o
!
###Markdown
We can create a list from a string. The list will have one element for each character in the string:
###Code
message = "Hello world!"
message_list = list(message)
print(message_list)
###Output
['H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!']
###Markdown
Slicing strings---We can access any character in a string by its position, just as we access individual items in a list:
###Code
message = "Hello World!"
first_char = message[0]
last_char = message[-1]
print(first_char, last_char)
###Output
('H', '!')
###Markdown
We can extend this to take slices of a string:
###Code
message = "Hello World!"
first_three = message[:3]
last_three = message[-3:]
print(first_three, last_three)
###Output
('Hel', 'ld!')
###Markdown
Finding substrings---Now that you have seen what indexes mean for strings, we can search for *substrings*. A substring is a series of characters that appears in a string.You can use the *in* keyword to find out whether a particular substring appears in a string:
###Code
message = "I like cats and dogs."
dog_present = 'dog' in message
print(dog_present)
###Output
True
###Markdown
If you want to know where a substring appears in a string, you can use the *find()* method. The *find()* method tells you the index at which the substring begins.
###Code
message = "I like cats and dogs."
dog_index = message.find('dog')
print(dog_index)
###Output
16
###Markdown
Note, however, that this function only returns the index of the first appearance of the substring you are looking for. If the substring appears more than once, you will miss the other substrings.
###Code
###highlight=[2]
message = "I like cats and dogs, but I'd much rather own a dog."
dog_index = message.find('dog')
print(dog_index)
###Output
16
###Markdown
If you want to find the last appearance of a substring, you can use the *rfind()* function:
###Code
###highlight=[3,4]
message = "I like cats and dogs, but I'd much rather own a dog."
last_dog_index = message.rfind('dog')
print(last_dog_index)
###Output
48
###Markdown
Replacing substrings---You can use the *replace()* function to replace any substring with another substring. To use the *replace()* function, give the substring you want to replace, and then the substring you want to replace it with. You also need to store the new string, either in the same string variable or in a new variable.
###Code
message = "I like cats and dogs, but I'd much rather own a dog."
message = message.replace('dog', 'snake')
print(message)
###Output
I like cats and snakes, but I'd much rather own a snake.
###Markdown
Counting substrings---If you want to know how many times a substring appears within a string, you can use the *count()* method.
###Code
message = "I like cats and dogs, but I'd much rather own a dog."
number_dogs = message.count('dog')
print(number_dogs)
###Output
2
###Markdown
Splitting strings---Strings can be split into a set of substrings when they are separated by a repeated character. If a string consists of a simple sentence, the string can be split based on spaces. The *split()* function returns a list of substrings. The *split()* function takes one argument, the character that separates the parts of the string.
###Code
message = "I like cats and dogs, but I'd much rather own a dog."
words = message.split(' ')
print(words)
###Output
['I', 'like', 'cats', 'and', 'dogs,', 'but', "I'd", 'much', 'rather', 'own', 'a', 'dog.']
###Markdown
Notice that the punctuation is left in the substrings.It is more common to split strings that are really lists, separated by something like a comma. The *split()* function gives you an easy way to turn comma-separated strings, which you can't do much with in Python, into lists. Once you have your data in a list, you can work with it in much more powerful ways.
###Code
animals = "dog, cat, tiger, mouse, liger, bear"
# Rewrite the string as a list, and store it in the same variable
animals = animals.split(',')
print(animals)
###Output
['dog', ' cat', ' tiger', ' mouse', ' liger', ' bear']
###Markdown
Notice that in this case, the spaces are also ignored. It is a good idea to test the output of the *split()* function and make sure it is doing what you want with the data you are interested in.One use of this is to work with spreadsheet data in your Python programs. Most spreadsheet applications allow you to dump your data into a comma-separated text file. You can read this file into your Python program, or even copy and paste from the text file into your program file, and then turn the data into a list. You can then process your spreadsheet data using a *for* loop. Other string methods---There are a number of [other string methods](http://docs.python.org/3.3/library/stdtypes.htmlstring-methods) that we won't go into right here, but you might want to take a look at them. Most of these methods should make sense to you at this point. You might not have use for any of them right now, but it is good to know what you can do with strings. This way you will have a sense of how to solve certain problems, even if it means referring back to the list of methods to remind yourself how to write the correct syntax when you need it. Exercises--- Listing a Sentence- Store a single sentence in a variable. Use a for loop to print each character from your sentence on a separate line. Sentence List- Store a single sentence in a variable. Create a list from your sentence. Print your raw list (don't use a loop, just print the list). Sentence Slices- Store a sentence in a variable. Using slices, print out the first five characters, any five consecutive characters from the middle of the sentence, and the last five characters of the sentence. Finding Python- Store a sentence in a variable, making sure you use the word *Python* at least twice in the sentence.- Use the *in* keyword to prove that the word *Python* is actually in the sentence.- Use the *find()* function to show where the word *Python* first appears in the sentence.- Use the *rfind()* function to show the last place *Python* appears in the sentence.- Use the *count()* function to show how many times the word *Python* appears in your sentence.- Use the *split()* function to break your sentence into a list of words. Print the raw list, and use a loop to print each word on its own line.- Use the *replace()* function to change *Python* to *Ruby* in your sentence. Challenges--- Counting DNA Nucleotides- [Project Rosalind](http://rosalind.info/problems/locations/) is a [problem set](http://rosalind.info/problems/list-view/) based on biotechnology concepts. It is meant to show how programming skills can help solve problems in genetics and biology.- If you have understood this section on strings, you have enough information to solve the first problem in Project Rosalind, [Counting DNA Nucleotides](http://rosalind.info/problems/dna/). Give the sample problem a try.- If you get the sample problem correct, log in and try the full version of the problem! Transcribing DNA into RNA- You also have enough information to try the second problem, [Transcribing DNA into RNA](http://rosalind.info/problems/rna/). Solve the sample problem.- If you solved the sample problem, log in and try the full version! Complementing a Strand of DNA- You guessed it, you can now try the third problem as well: [Complementing a Strand of DNA](http://rosalind.info/problems/revc/). Try the sample problem, and then try the full version if you are successful. [top]() Tuples===Tuples are basically lists that can never be changed. Lists are dynamic; they grow as you append and insert items and they can shrink as you remove items. You can modify any element you want to in a list. Sometimes we like this behavior, but other times we may want to ensure that no user or no part of a program can change a list. That's what tuples are for.Technically, lists are *mutable* objects and tuples are *immutable* objects. Mutable objects can change (think of *mutations*), and immutable objects can not change.Defining tuples, and accessing elements---You define a tuple just like you define a list, except you use parentheses instead of square brackets. Once you have a tuple, you can access individual elements just like you can with a list, and you can loop through the tuple with a *for* loop:
###Code
colors = ('red', 'green', 'blue')
print("The first color is: " + colors[0])
print("\nThe available colors are:")
for color in colors:
print("- " + color)
###Output
The first color is: red
The available colors are:
- red
- green
- blue
###Markdown
If you try to add something to a tuple, you will get an error:
###Code
colors = ('red', 'green', 'blue')
colors.append('purple')
###Output
_____no_output_____
###Markdown
The same kind of thing happens when you try to remove something from a tuple, or modify one of its elements. Once you define a tuple, you can be confident that its values will not change. Using tuples to make strings---We have seen that it is pretty useful to be able to mix raw English strings with values that are stored in variables, as in the following:
###Code
animal = 'dog'
print("I have a " + animal + ".")
###Output
I have a dog.
###Markdown
This was especially useful when we had a series of similar statements to make:
###Code
animals = ['dog', 'cat', 'bear']
for animal in animals:
print("I have a " + animal + ".")
###Output
I have a dog.
I have a cat.
I have a bear.
###Markdown
I like this approach of using the plus sign to build strings because it is fairly intuitive. We can see that we are adding several smaller strings together to make one longer string. This is intuitive, but it is a lot of typing. There is a shorter way to do this, using *placeholders*.Python ignores most of the characters we put inside of strings. There are a few characters that Python pays attention to, as we saw with strings such as "\t" and "\n". Python also pays attention to "%s" and "%d". These are placeholders. When Python sees the "%s" placeholder, it looks ahead and pulls in the first argument after the % sign:
###Code
animal = 'dog'
print("I have a %s." % animal)
###Output
I have a dog.
###Markdown
This is a much cleaner way of generating strings that include values. We compose our sentence all in one string, and then tell Python what values to pull into the string, in the appropriate places.This is called *string formatting*, and it looks the same when you use a list:
###Code
animals = ['dog', 'cat', 'bear']
for animal in animals:
print("I have a %s." % animal)
###Output
I have a dog.
I have a cat.
I have a bear.
###Markdown
If you have more than one value to put into the string you are composing, you have to pack the values into a tuple:
###Code
animals = ['dog', 'cat', 'bear']
print("I have a %s, a %s, and a %s." % (animals[0], animals[1], animals[2]))
###Output
I have a dog, a cat, and a bear.
###Markdown
String formatting with numbersIf you recall, printing a number with a string can cause an error:
###Code
number = 23
print("My favorite number is " + number + ".")
###Output
_____no_output_____
###Markdown
Python knows that you could be talking about the value 23, or the characters '23'. So it throws an error, forcing us to clarify that we want Python to treat the number as a string. We do this by *casting* the number into a string using the *str()* function:
###Code
###highlight=[3]
number = 23
print("My favorite number is " + str(number) + ".")
###Output
My favorite number is 23.
###Markdown
The format string "%d" takes care of this for us. Watch how clean this code is:
###Code
###highlight=[3]
number = 23
print("My favorite number is %d." % number)
###Output
My favorite number is 23.
###Markdown
If you want to use a series of numbers, you pack them into a tuple just like we saw with strings:
###Code
numbers = [7, 23, 42]
print("My favorite numbers are %d, %d, and %d." % (numbers[0], numbers[1], numbers[2]))
###Output
My favorite numbers are 7, 23, and 42.
###Markdown
Just for clarification, look at how much longer the code is if you use concatenation instead of string formatting:
###Code
###highlight=[3]
numbers = [7, 23, 42]
print("My favorite numbers are " + str(numbers[0]) + ", " + str(numbers[1]) + ", and " + str(numbers[2]) + ".")
###Output
My favorite numbers are 7, 23, and 42.
###Markdown
You can mix string and numerical placeholders in any order you want.
###Code
names = ['eric', 'ever']
numbers = [23, 2]
print("%s's favorite number is %d, and %s's favorite number is %d." % (names[0].title(), numbers[0], names[1].title(), numbers[1]))
###Output
Eric's favorite number is 23, and Ever's favorite number is 2.
|
BirthWeight/.ipynb_checkpoints/1.1.Birth_weight_prediction-checkpoint.ipynb | ###Markdown
**Birth weight prediction**--- Load Libraries
###Code
import os
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
###Output
_____no_output_____
###Markdown
Load The data
###Code
data = pd.read_csv('./inputs/baby-weights-dataset2.csv')
data.columns
data.head()
###Output
_____no_output_____
###Markdown
Dropping bias columns
###Code
columns = ['ID', 'MARITAL', 'FEDUC', 'MEDUC', 'HISPMOM', 'HISPDAD']
data.drop(columns, inplace=True, axis=1, errors='ignore')
data.columns
data.head()
###Output
_____no_output_____
###Markdown
Correlation Analysis Standardize the data
###Code
y = data[['BWEIGHT']]
sc_y = StandardScaler()
# fit and transform the data
y_std = sc_y.fit_transform(y)
y_std = pd.DataFrame(y_std, columns = y.columns)
y_std.head()
df = pd.DataFrame(sc_y.inverse_transform(y_std), columns=['y_std'])
#Pounds to kilograms
df['y_kg'] = df['y_std'].apply(lambda x: x * .454)
plt.scatter(df.index, df['y_kg'], color = 'red', label = 'Real data')
plt.legend()
plt.show()
X = data.drop(['BWEIGHT'], axis=1)
sc_X = StandardScaler()
# fit and transform the data
X_std = sc_X.fit_transform(X)
X_std = pd.DataFrame(X_std, columns = X.columns)
X_std.head()
#To save scalers
os.makedirs('outputs', exist_ok=True)
dump(sc_X, './outputs/std_scaler_X.bin', compress=True)
dump(sc_y, './outputs/std_scaler_y.bin', compress=True)
corrMatrix = X_std.corr()
print (corrMatrix)
sn.heatmap(corrMatrix)
plt.show()
###Output
_____no_output_____
###Markdown
Dropping high correlated features
###Code
corr_abs = corrMatrix.abs()
upper_tri = corr_abs.where(np.triu(np.ones(corr_abs.shape),k=1).astype(np.bool))
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.7)]
print(to_drop)
#data_std.drop(to_drop, inplace=True, axis=1, errors='ignore')
#data_std.columns
###Output
_____no_output_____
###Markdown
Split the data
###Code
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_std, y_std, test_size=0.3, random_state=42)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.33, random_state=42)
X_val.to_json('./inputs/X_validation_data.json', orient="split")
y_val.to_json('./inputs/y_validation_data.json', orient="split")
import json
columns = X_val.columns.to_list()
with open("./outputs/columns.txt", "w") as fp:
json.dump(columns, fp)
###Output
_____no_output_____
###Markdown
Model Training
###Code
from azureml.core import Workspace
ws = Workspace.get(name='demo-aml',subscription_id='YOUR-SUSCRIPTION-ID',resource_group='demo-aml')
from azureml.core import Experiment
experiment_name = 'BABY-WEIGHT-EXP'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
Model1: Xgboost
###Code
run = exp.start_logging()
from xgboost import XGBRegressor
# define model
model = XGBRegressor()
# fit model
model.fit(X_train, y_train)
###Output
[07:58:49] WARNING: src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
###Markdown
Evaluation
###Code
yhat = model.predict(X_test)
df1 = pd.DataFrame(sc_y.inverse_transform(yhat), columns=['y_hat'])
df2 = pd.DataFrame(sc_y.inverse_transform(y_test), columns=['y_test'])
df = pd.concat([df1, df2], axis=1)
#Pounds to kilograms
df['y_hat_kg'] = df['y_hat'].apply(lambda x: x * .454)
df['y_test_kg'] = df['y_test'].apply(lambda x: x * .454)
df.head()
plt.scatter(df.index, df['y_test_kg'], color = 'red', label = 'Real data')
plt.scatter(df.index, df['y_hat_kg'], color = 'blue', label = 'Predicted data')
plt.title('Prediction')
plt.legend()
run.log_image(name='prediction_model', plot=plt)
plt.show()
# evaluate predictions
mse = mean_squared_error(y_test, yhat)
mae = mean_absolute_error(y_test, yhat)
r2 = r2_score(y_test, yhat)
r2 = r2_score(y_test, yhat)
print('MSE: %.3f' % mse)
run.log('MSE', mse)
print('MAE: %.3f' % mae)
run.log('MAE', mae)
print('R2: %.3f' % r2)
run.log('R2',r2)
#Save model
model.save_model("./outputs/model.bst")
#dump(value=model, filename='./outputs/model.pkl')
tags = { "Model": 'BABY-WEIGHT',
"Type": 'Xgboost',
"User": '[email protected]'}
run.set_tags(tags)
run.complete()
### Model Registry
model_name = 'BABY-WEIGHT'
run.register_model(model_name= model_name,
model_path = './outputs/',
tags = tags,
description="Neural Network for Birth weight prediction")
###Output
_____no_output_____ |
Notebooks/Tixi_Basic/tixi_exercise_solution.ipynb | ###Markdown
Read and write CPACS using TiXI[TiXI](https://github.com/DLR-SC/tixi): - Binary Downloads: https://github.com/DLR-SC/tixi/wiki/Downloads - API Documentation: http://tixi.sourceforge.net/Doc/index.html - Issue Tracker: https://github.com/DLR-SC/tixi/issues - Wiki: https://github.com/DLR-SC/tixi/wiki
###Code
from tixi3 import tixi3wrapper
tixi_h = tixi3wrapper.Tixi3()
help(tixi_h)
###Output
_____no_output_____
###Markdown
Open a CPACS file with tixi:
###Code
tixi_h.open('../D150_v30.xml')
###Output
_____no_output_____
###Markdown
Get airport information:
###Code
#airport_xpath = '/cpacs/airports/airport[@uID="FRA"]'
airport_xpath = tixi_h.uIDGetXPath('FRA')
airport_name = tixi_h.getTextElement('/'.join((airport_xpath, 'name')))
positionNorth = tixi_h.getDoubleElement('/'.join((airport_xpath, 'positionNorth')))
positionEast = tixi_h.getDoubleElement('/'.join((airport_xpath, 'positionEast')))
print('Name: %s\nPosition North: %.3f\nPosition East: %.3f' % (airport_name, positionNorth, positionEast))
###Output
_____no_output_____
###Markdown
Some information would be very long if stored as xml-sequence. Thus, we introduced vectors:
###Code
root_xpath = '/'.join((tixi_h.uIDGetXPath('FRAtoEWRID'),'flightPath'))
vector_size = tixi_h.getVectorSize('/'.join((root_xpath,'waypoints')))
waypoints = tixi_h.getTextElement('/'.join((root_xpath,'waypoints'))).split(';')
latitude = tixi_h.getFloatVector('/'.join((root_xpath,'latitude')), vector_size)
longitude = tixi_h.getFloatVector('/'.join((root_xpath,'longitude')), vector_size)
for i, wp in enumerate(waypoints):
print('%10s %.3f %.3f'%(wp, latitude[i], longitude[i]))
###Output
_____no_output_____
###Markdown
Let's add information about Braunschweig airport: - add uID="BWE" to the new element - add text element "name" with "Braunschweig, Germany" - add Positions: 52.3199° north / 10.556° east - add elevation of 276ft
###Code
apts_xpath = '/cpacs/airports'
idx = tixi_h.getNamedChildrenCount(apts_xpath,'airport') + 1
tixi_h.createElementAtIndex(apts_xpath,'airport',idx)
apt_xpath = '/'.join((apts_xpath,'airport[%i]'%idx))
tixi_h.addTextAttribute(apt_xpath,'uID','BWE')
tixi_h.addTextElement(apt_xpath, 'name', 'Braunschweig, Germany')
tixi_h.addDoubleElement(apt_xpath, 'positionNorth', 52.319, '%.3f')
tixi_h.addDoubleElement(apt_xpath, 'positionEast', 10.556, '%.3f')
tixi_h.addIntegerElement(apt_xpath, 'elevation', 84, '%i')
###Output
_____no_output_____
###Markdown
Save the modified CPACS instance as a new file:
###Code
tixi_h.save('D150_v30_modified.xml')
tixi_h.close()
###Output
_____no_output_____ |
src/lab2/030_NamedEntityRecognition.ipynb | ###Markdown
3.0 Build a Named Entity Recognizer (NVIDIA NeMo v1.0)In this notebook, you'll build an NER (named entity recognition) application that finds disease names in medical disease abstracts. The model does not "search" for names from a list, but rather "recognizes" that certain words are disease references from the context of the language. **[3.1 Token Classification from the Command Line](3.1-Token-Classification-from-the-Command-Line)** [3.1.1 Data Input](3.1.1-Data-Input) [3.1.1.1 IOB Tagging](3.1.1.1-IOB-Tagging) [3.1.2 Configuration File](3.1.2-Configuration-File) [3.1.3 Hydra-Enabled Python Scripts](3.1.3-Hydra-Enabled-Python-Scripts) [3.1.4 Exercise: Train the Model](3.1.4-Exercise:-Train-the-Model)**[3.2 Domain-Specific Training](3.2-Domain-Specific-Training)** [3.2.1 Visualize the Results with TensorBoard](3.2.1-Visualize-the-Results-with-TensorBoard)**[3.3 Evaluation](3.3-Evaluation)****[3.4 Inference](3.4-Inference)**For the NER task, you'll follow the same basic steps as in the text classification task to build your project, train it, and test it. This time, however, you'll train a classifier on the *domain-specific* BioMegatron language model. BioMegatron is a [BERT](https://arxiv.org/abs/1810.04805)-like [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf) model pre-trained on a large biomedical text corpus ([PubMed](https://pubmed.ncbi.nlm.nih.gov/) abstracts and full-text commercial use collection). We can expect to have better performance compared to the general language models, because our disease dataset is from the same biomedical domain.There are some alternatives of BioMegatron, most notably [BioBERT](https://arxiv.org/abs/1901.08746). Compared to BioBERT, BioMegatron is larger by model size and pre-trained on larger text corpus. --- 3.1 Token Classification from the Command LineThe question we want to answer is:**Given sentences from medical abstracts, what diseases are mentioned?**Recall the NLP models available with NeMo:
###Code
!tree nemo/examples/nlp -L 1
###Output
_____no_output_____
###Markdown
We use the [token classification](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/token_classification.html) model for NER because we are classifying at the "token" level, in this case classifying words related to diseases. 3.1.1 Data InputAs we saw in the [1.0 Explore the Data](010_ExploreData.ipynb) notebook, the dataset for the NER project is made up of sentences with IOB tagging for disease names, where each word in a sentence is tagged as inside, outside, or the beginning of a named entity. The training text and label files are `text_train.txt` and `labels_train.txt`, respectively. The validation and test files follow a similar naming pattern. Verify the location of the data files.
###Code
NER3_DATA_DIR = '/dli/task/data/NCBI_ner-3'
!ls -lh $NER3_DATA_DIR
# Take a look at the data
print("*****\ntext_test.txt sample\n*****")
!head -n 3 $NER3_DATA_DIR/text_test.txt
print("\n*****\nlabels_test.txt sample\n*****")
!head -n 3 $NER3_DATA_DIR/labels_test.txt
###Output
_____no_output_____
###Markdown
3.1.1.1 IOB TaggingRecall that the sentences and labels in the NER dataset map to each other with _inside, outside, beginning (IOB)_ tagging.This mechanism can be used in a general way for multiple named entity types:* B-{CHUNK_TYPE} – for the word in the Beginning chunk* I-{CHUNK_TYPE} – for words Inside the chunk* O – Outside any chunkIn our case, we are only looking for "disease" as our entity (or chunk) type, so we don't need to identify beyond the three classes: I, O, and B.**Three classes*** B - Beginning of disease name* I - Inside word of disease name* O - Outside of all disease names```textIdentification of APC2 , a homologue of the adenomatous polyposis coli tumour suppressor .O O O O O O O O B I I I O O ```These are defined in our `labels.csv` file:
###Code
!head $NER3_DATA_DIR/label_ids.csv
###Output
_____no_output_____
###Markdown
If we were looking for two kinds of named entities, such as nouns and verbs in a parts-of-speech analysis, we would use a five-class IOB scheme:**Five classes*** B-N - Beginning of noun word or phrase* I-N - Inside noun word or phrase* B-V - Beginning of verb word or phrase* I-V - Inside verb word or phrase* O - Outside all nouns and verbsIf you are intereested in learning more, take a look at [this paper](http://cs229.stanford.edu/proj2005/KrishnanGanapathy-NamedEntityRecognition.pdf) on the subject. The NCBI_ner-3 disease data is in the correct format for token classification as described in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/token_classification.htmldata-input-for-token-classification-model), so we are ready to look at the configuration file. 3.1.2 Configuration FileLook at more detail for the NeMo token classification directory:
###Code
TC_DIR = "/dli/task/nemo/examples/nlp/token_classification"
!tree $TC_DIR
###Output
_____no_output_____
###Markdown
The config file for NER, `token_classification_config.yaml`, specifies model, training, and experiment management details, such as file locations, pretrained models, and hyperparameters. This is the same general pattern used in the text classification configuration file. We'll take a look at the details of each section using the `OmegaConf` tool introduced in the text classification project.
###Code
from omegaconf import OmegaConf
CONFIG_DIR = "/dli/task/nemo/examples/nlp/token_classification/conf"
CONFIG_FILE = "token_classification_config.yaml"
config = OmegaConf.load(CONFIG_DIR + "/" + CONFIG_FILE)
# print the model section
print(OmegaConf.to_yaml(config.model))
# complete list of supported BERT-like models
from nemo.collections import nlp as nemo_nlp
nemo_nlp.modules.get_pretrained_lm_models_list()
###Output
_____no_output_____
###Markdown
In the `model` section, a path for `dataset.data_dir` that contains all the data files is required. The actual file names we are using already conform to the default values, so we don't need to override those.For our first try, we can override `language_model.pretrained_model_name` to `bert-base-cased`, so we can compare the results to the domain-specific `biomegatron-bert-345m-cased` in another experiment. Since we will need to conserve memory space to run BioMegatron, we will go ahead and reduce `dataset.max_seq_length` and the `batch_size`.
###Code
# print the trainer section
print(OmegaConf.to_yaml(config.trainer))
###Output
_____no_output_____
###Markdown
For efficiency, we can set the `amp_level` to 'O1'. Since the language models we are going to compare are large and take a long time to run, we will override the `max_epochs` to a small number.
###Code
# print the experiment manager section
print(OmegaConf.to_yaml(config.exp_manager))
###Output
_____no_output_____
###Markdown
There is no need to change the `exp_manger` default settings for now. 3.1.3 Hydra-Enabled Python ScriptsThe Python scripts, `token_classification_train.py` and `token_evaluate.py`, encapsulate everything needed to run a token classification experiment defined by the configuration file. Training and evaluation are expected to be run separately in this case. As with text classification, both scripts employ Facebook's [Hydra](https://hydra.cc/) tool for configuration management, which allows the entire experiment to be run from the command line, overriding config file values as needed.To recap, the parameters we need to change or override are:* `model.language_model.pretrained_model_name`: set to 'bert-base-cased'* `model.dataset.data_dir`: set to /dli/task/data/NCBI_ner-3* `model.dataset.max_seq_length`: 64* `model.train_ds.batch_size`: set to 32* `model.val_ds.batch_size`: set to 32* `model.test_ds.batch_size`: set to 32* `trainer.amp_level`: set to "O1"* `trainer.max_epochs`: set to 3 3.1.4 Exercise: Train the ModelRun the training script, `token_classification_train.py` just as you ran similar experiments in text classification notebook. The new values for overrides are provided for you in the cell below. Add the command with appropriate overrides and run the cell. If you get stuck, refer to the [solution](solutions/ex3.1.4.ipynb).
###Code
%%time
# The training takes about 2 minutes to run
TOKEN_DIR = "/dli/task/nemo/examples/nlp/token_classification"
# set the values we want to override
PRETRAINED_MODEL_NAME = 'bert-base-cased'
DATA_DIR = '/dli/task/data/NCBI_ner-3'
MAX_SEQ_LENGTH = 64
BATCH_SIZE = 32
AMP_LEVEL = 'O1'
MAX_EPOCHS = 3
# Override the config values in the command line
# FIXME
###Output
_____no_output_____
###Markdown
How were the results? Your log should have included something like:``` label precision recall f1 support O (label_id: 0) 99.34 99.35 99.34 21648 B (label_id: 1) 85.86 89.21 87.50 769 I (label_id: 2) 91.74 89.00 90.35 1073 ------------------- micro avg 98.54 98.54 98.54 23490 macro avg 92.31 92.52 92.40 23490 weighted avg 98.55 98.54 98.55 23490 Epoch 2: 100%|█| 199/199 [00:15<00:00, 12.45it/s, loss=0.0251, v_num=4-43, val_l``` --- 3.2 Domain-Specific Training Try another experiment, this time overriding the `model.language_model.pretrained_model_name` with `biomegatron-bert-345m-cased`. This is a large model with 345 million parameter. Therefore, it takes longer to run.
###Code
%%time
# The training takes about 5-6 minutes to run
TOKEN_DIR = "/dli/task/nemo/examples/nlp/token_classification"
# set the values we want to override
PRETRAINED_MODEL_NAME = 'biomegatron-bert-345m-cased'
DATA_DIR = '/dli/task/data/NCBI_ner-3'
MAX_SEQ_LENGTH = 64
BATCH_SIZE = 32
AMP_LEVEL = 'O1'
MAX_EPOCHS = 3
# Override the config values in the command line
!python $TOKEN_DIR/token_classification_train.py \
model.language_model.pretrained_model_name=$PRETRAINED_MODEL_NAME \
model.dataset.data_dir=$DATA_DIR \
model.dataset.max_seq_length=$MAX_SEQ_LENGTH \
model.train_ds.batch_size=$BATCH_SIZE \
model.validation_ds.batch_size=$BATCH_SIZE \
model.test_ds.batch_size=$BATCH_SIZE \
trainer.amp_level=$AMP_LEVEL \
trainer.max_epochs=$MAX_EPOCHS
###Output
_____no_output_____
###Markdown
3.2.1 Visualize the Results with TensorBoardThe [experiment manager](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/core/core.html?highlight=tensorboardexperiment-manager) saves results for viewing with TensorBoard. Execute the following cell to create a link to TensorBoard for your instance, then click on the link to open Tensorboard in a tab on your browser.
###Code
%%js
const href = window.location.hostname +'/tensorboard/';
let a = document.createElement('a');
let link = document.createTextNode('Open Tensorboard!');
a.appendChild(link);
a.href = "http://" + href;
a.style.color = "navy"
a.target = "_blank"
element.append(a);
###Output
_____no_output_____
###Markdown
To compare the performance of the models you've run, select the "f1" scaler. You can see all the models compared together or select individual models for comparison. In this example comparison, five epochs were run. The orange line shows results from the `bert-base-cased` model and the blue line is the `biomegatron-bert-345m-cased` model. The BioMegatron model does quite well very quickly, as it is better able to discern the disease names. It still has a slightly higher f1 after five epochs. The model you choose for your own project depends on your constraints in memory, time, and performance requirements. Note that your results may vary from the example due to randomness in the learning algorithm. --- 3.3 Evaluation
###Code
# Restart the kernel
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
To evaluate the model over the test set, we must specify the location of the `.nemo` trained model. Each experiment runs results in a time-stamped directory under `nemo_experiments`. If we drill down, we can find the `checkpoints` folder where the final `token_classification_model.nemo` resides. In the next cell, a bit of Python logic is used to capture a list of models, and identify the latest one.
###Code
import glob
nemo_model_paths = glob.glob('nemo_experiments/token_classification_model/*/checkpoints/*.nemo')
# Sort newest first
nemo_model_paths.sort(reverse=True)
print("The latest model is \n{}".format(nemo_model_paths[0]))
###Output
_____no_output_____
###Markdown
There are a couple of ways to run an evaluation over the test set:1. Execute `token_classification_evaluate.py` with the same overrides, plus an override for the `pretrained_model`, which must be in `.nemo` format.```text !python $TOKEN_DIR/token_classification_evaluate.py \ model.dataset.data_dir=$DATA_DIR \ model.dataset.max_seq_length=$MAX_SEQ_LENGTH \ model.train_ds.batch_size=$BATCH_SIZE \ model.validation_ds.batch_size=$BATCH_SIZE \ model.test_ds.batch_size=$BATCH_SIZE \ model.language_model.pretrained_model_name=$PRETRAINED_MODEL_NAME \ pretrained_model=$LATEST_MODEL``` 2. Instantiate the model by restoring the trained model checkpoint and execute a NeMo method to evaluate the test set. This is the method we will step through.
###Code
# Instantiate the model by restoring from the .nemo checkpoint
from nemo.collections import nlp as nemo_nlp
LATEST_MODEL = nemo_model_paths[0]
model = nemo_nlp.models.TokenClassificationModel.restore_from(LATEST_MODEL)
###Output
_____no_output_____
###Markdown
Evaluate the model with the test set using the `evaluate_from_file` method. Set the `add_confusion_matrix` to True to get a nice visual representation of how well the model did.
###Code
import os.path
DATA_DIR = '/dli/task/data/NCBI_ner-3'
OUTPUT_DIR = '/dli/task/nemo_experiments/token_classification_model/logs'
model.evaluate_from_file(
text_file=os.path.join(DATA_DIR, 'text_test.txt'),
labels_file=os.path.join(DATA_DIR, 'labels_test.txt'),
output_dir=OUTPUT_DIR,
add_confusion_matrix=True,
normalize_confusion_matrix=True,
batch_size=1
)
###Output
_____no_output_____
###Markdown
The results should look something like:```[NeMo I 2021-06-29 00:42:16 token_classification_model:499] precision recall f1-score support O (label id: 0) 0.9958 0.9910 0.9934 22450 B (label id: 1) 0.8886 0.9135 0.9009 960 I (label id: 2) 0.8724 0.9374 0.9038 1087 accuracy 0.9856 24497 macro avg 0.9189 0.9473 0.9327 24497 weighted avg 0.9861 0.9856 0.9858 24497The final confusion matrix visualization shows a bright diagonal, indicating that the predicted label matched the true label with high accuracy for all the label types (IOB).``` --- 3.4 InferenceTo run inference on a list of queries, use the same model already loaded with the `add_predictions` method.
###Code
queries = ["Clustering of missense mutations in the ataxia - telangiectasia gene in a sporadic T - cell leukaemia . ",
"Ataxia - telangiectasia ( A - T ) is a recessive multi - system disorder caused by mutations in the ATM gene at 11q22 - q23 ( ref . 3 ) . ",
"The risk of cancer , especially lymphoid neoplasias , is substantially elevated in A - T patients and has long been associated with chromosomal instability . ",
"By analysing tumour DNA from patients with sporadic T - cell prolymphocytic leukaemia ( T - PLL ) , a rare clonal malignancy with similarities to a mature T - cell leukaemia seen in A - T , we demonstrate a high frequency of ATM mutations in T - PLL . ",
"In marked contrast to the ATM mutation pattern in A - T , the most frequent nucleotide changes in this leukaemia were missense mutations . ",
"These clustered in the region corresponding to the kinase domain , which is highly conserved in ATM - related proteins in mouse , yeast and Drosophila . ",
"The resulting amino - acid substitutions are predicted to interfere with ATP binding or substrate recognition . ",
"Two of seventeen mutated T - PLL samples had a previously reported A - T allele . ",
"In contrast , no mutations were detected in the p53 gene , suggesting that this tumour suppressor is not frequently altered in this leukaemia . ",
"Occasional missense mutations in ATM were also found in tumour DNA from patients with B - cell non - Hodgkins lymphomas ( B - NHL ) and a B - NHL cell line . "
]
results = model.add_predictions(queries, output_file='predictions.txt')
!cat predictions.txt
###Output
_____no_output_____ |
tests/integration_tests/FittingData.ipynb | ###Markdown
Fitting There are two built in fitting engines, `lmfit` and `bumps`.
###Code
# Import all the packages
from easyCore import np
from easyDiffractionLib.sample import Sample
from easyDiffractionLib import Phases
from easyDiffractionLib.interface import InterfaceFactory as Calculator
from easyDiffractionLib.Elements.Experiments.Pattern import Pattern1D
from easyDiffractionLib.Profiles.P1D import Instrument1DCWParameters
from easyCore.Fitting.Fitting import Fitter
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Preparing the sample and data Load structure from a CIF file
###Code
calculator = Calculator()
phase = Phases.from_cif_file('PbSO4.cif')
sample = Sample(phases=phase, parameters=Instrument1DCWParameters.default(), calculator=calculator)
###Output
_____no_output_____
###Markdown
Visualise the structure
###Code
import py3Dmol
viewer = py3Dmol.view()
viewer.addModel(phase[0].to_cif_str(),'cif')
viewer.setStyle({'sphere':{'colorscheme':'Jmol','scale':.2},'stick':{'colorscheme':'Jmol', 'radius': 0.1}})
viewer.addUnitCell()
viewer.replicateUnitCell(2,2,1)
viewer.zoomTo()
###Output
_____no_output_____
###Markdown
Load experimental data from a file
###Code
file_path = 'PbSO4_neutrons_short.xye'
data_x, data_y, data_e = np.loadtxt(file_path, unpack=True)
data_y = data_y/100.0
# Generate the simulation y-data
sim_y_data = calculator.fit_func(data_x)
%matplotlib notebook
plt.plot(data_x, data_y, label='Experimental')
plt.plot(data_x, sim_y_data, label='Starting point')
plt.legend()
###Output
_____no_output_____
###Markdown
The charts do not match very well, because our simulation did not include any parameters related to the experiment.Let's assign some decent values then.
###Code
sample.parameters.wavelength = 1.912
sample.parameters.u_resolution = 1.4
sample.parameters.v_resolution = -0.42
sample.parameters.w_resolution = 0.38
sample.parameters.x_resolution = 0.0
sample.parameters.y_resolution = 0.0
sim_y_data = calculator.fit_func(data_x)
%matplotlib notebook
plt.plot(data_x, data_y, label='Experimental')
plt.plot(data_x, sim_y_data, label='Starting point')
plt.legend()
###Output
_____no_output_____
###Markdown
This looks much better now - experimental and theoretical peaks seem to be very close but there is no background included in our simulation.
###Code
from easyDiffractionLib.Elements.Backgrounds.Point import PointBackground, BackgroundPoint
bg = PointBackground(linked_experiment='PbSO4')
bg.append(BackgroundPoint.from_pars(data_x[0], 2))
bg.append(BackgroundPoint.from_pars(data_x[-1], 2))
sample.set_background(bg)
sim_y_data = calculator.fit_func(data_x)
%matplotlib notebook
plt.plot(data_x, data_y, label='Experimental')
plt.plot(data_x, sim_y_data, label='Starting point')
plt.legend()
###Output
_____no_output_____
###Markdown
These two charts look close enough to attempt fitting. Fitting to the data Initalize the fitting engine and define parameters to optimize
###Code
f = Fitter(sample, calculator.fit_func)
# Vary the scale and the BG points
sample.pattern.scale.fixed = False
sample.parameters.resolution_u.fixed = False
sample.parameters.resolution_v.fixed = False
sample.parameters.resolution_w.fixed = False
sample.backgrounds[0][0].y.fixed = False
sample.backgrounds[0][1].y.fixed = False
###Output
_____no_output_____
###Markdown
Perform the fit
###Code
result = f.fit(data_x, data_y, weights=1/data_e)
if result.success:
print("The fit has been successful: {}".format(result.success))
print("The gooodness of fit is: {}".format(result.goodness_of_fit))
sim_y_data = calculator.fit_func(data_x)
%matplotlib notebook
plt.plot(data_x, data_y, label='Experimental')
plt.plot(data_x, sim_y_data, label='Best Fit')
plt.legend()
###Output
_____no_output_____
###Markdown
Fitted parameters
###Code
print(f'Scale: {sample.pattern.scale}')
print(f'BG 0: {sample.backgrounds[0][0]}')
print(f'BG 1: {sample.backgrounds[0][1]}')
###Output
_____no_output_____
###Markdown
**Parameter object with varying accessors**
###Code
print(f'Scale: {sample.pattern.scale}')
print(f'Scale: {sample.pattern.scale.value}')
print(f'Scale: {sample.pattern.scale.raw_value}')
###Output
_____no_output_____
###Markdown
The fit is quite good, but let's see if we can do better with a different optimizer. Change the optimizer to `bumps`
###Code
print("available minimizers:", f.available_engines)
print()
print("current minimizer:", f.current_engine.name)
print("available methods of current minimizer:", f.available_methods())
print("switch minimizer")
f.switch_engine('bumps')
f_method = 'lm'
print("current minimizer:", f.current_engine.name)
print("available methods of current minimizer:", f.available_methods())
###Output
_____no_output_____
###Markdown
**Rerun fitting** (takes a while!)This seems completely broken and needs fixing. The fitting just hangs.
###Code
result = f.fit(data_x, data_y, weights=1/data_e, method=f_method)
if result.success:
print("The fit has been successful: {}".format(result.success))
print("The gooodness of fit is: {}".format(result.goodness_of_fit))
sim_y_data = calculator.fit_func(data_x)
%matplotlib notebook
plt.plot(data_x, data_y, label='Experimental')
plt.plot(data_x, sim_y_data, label='Best Fit')
plt.legend()
print(f'Scale: {sample.pattern.scale}')
print(f'BG 0: {sample.backgrounds[0][0]}')
print(f'BG 1: {sample.backgrounds[0][1]}')
#print(f'Res U: {sample.parameters.resolution_u}')
#print(f'Res V: {sample.parameters.resolution_v}')
#print(f'Res W: {sample.parameters.resolution_w}')
###Output
_____no_output_____ |
src/trash/Basic.ipynb | ###Markdown
Prediction Results **SETUP**
###Code
import pandas as pd
import numpy as np
from IPython.display import display
BRCA_PREDICTION_PATH = "../data/predictions_datasets/brca_prediction_2021-09-28/acf35ed1/predictions_soft_2021-09-28.csv"
OV_PREDICTION_PATH = "../data/predictions_datasets/ov_prediction_2021-09-28/d872749a/predictions_soft_2021-09-28.csv"
def load_prediction_data(data_path):
prediction = pd.read_csv(data_path)
print(f"Dataframe dimensions: {prediction.shape}")
display(prediction.head())
return prediction
###Output
_____no_output_____
###Markdown
BRCA
###Code
brca_prediction = load_prediction_data(BRCA_PREDICTION_PATH)
###Output
Dataframe dimensions: (3882, 4)
###Markdown
OV
###Code
ov_prediction = load_prediction_data(OV_PREDICTION_PATH)
brca_prediction[brca_prediction["UniProt_ID"] == "P04637"]
ov_prediction[ov_prediction["UniProt_ID"] == "P04637"]
unique_interactions = brca_prediction
def foo(x):
return x["UniProt_ID"], x["Mutation"], x["Interactor_UniProt_ID"]
brca_unique_interactions = set(brca_prediction[["UniProt_ID", "Mutation", "Interactor_UniProt_ID"]].apply(foo, axis=1))
ov_unique_interactions = set(ov_prediction[["UniProt_ID", "Mutation", "Interactor_UniProt_ID"]].apply(foo, axis=1))
print(f"Number of brca_unique_interactions: {len(brca_unique_interactions)}")
print(f"Number of ov_unique_interactions: {len(ov_unique_interactions)}")
unique_interactions = (
brca_unique_interactions | ov_unique_interactions
)
unique_interactions_intersection = (
brca_unique_interactions & ov_unique_interactions
)
print(f"Number of unique_interactions: {len(unique_interactions)}")
print(f"Number of unique_interactions_intersection: {len(unique_interactions_intersection)}")
brca_unique_interactions
ov_unique_interactions
def get_pred(prediction, interaction):
[pred] = (prediction[
(prediction["UniProt_ID"] == interaction[0]) &
(prediction["Mutation"] == interaction[1]) &
(prediction["Interactor_UniProt_ID"] == interaction[2])
]["Prediction"].values)
return pred
for interaction in unique_interactions_intersection:
pred_brca = get_pred(brca_prediction, interaction)
pred_ov = get_pred(ov_prediction, interaction)
if pred_brca != pred_ov:
print("ERR")
unique_interactions_intersection
('P34932', 'E269D', 'Q99933') in ov_unique_interactions
disruptive_predicted_interactions_data = pd.DataFrame({
"Patient": ["TCGA-AN-A049", "TCGA-AN-A049"],
"Protein": ["P1", "P2"],
"Mutation": ["M1", "M2"],
"Num All Interactions": [9, 4],
"All Interactors": [["I1", "I2", "...", "I9"], ["..."]],
"Num Disruptive Interactions": [7, 3],
"Disruptive Interactors": [["I1", "I3", "...", "I8"], ["..."]],
"Num Incr. + NoEff. Interactions": [2, 1],
"Incr. + NoEff. Interactors": [["I2", "I3", "...", "I8"], ["..."]],
})
disruptive_predicted_interactions_data
###Output
_____no_output_____ |
Note-6 A3CNet/Note-6.3 强化学习选股-SH50指数增强/.ipynb_checkpoints/test_agent-checkpoint.ipynb | ###Markdown
train
###Code
import multiprocessing
import threading
import tensorflow as tf
from agent.access import Access
from agent.main import Agent
NUMS_CPU = multiprocessing.cpu_count()
state_size = 58
batch_size = 50
action_size = 3
max_episodes = 1
GD = {}
class Worker(Agent):
def __init__(self, name, access, batch_size, state_size, action_size):
super().__init__(name, access, batch_size, state_size, action_size)
def run(self, sess, max_episodes, t_max=8):
episode_score_list = []
episode = 0
while episode < max_episodes:
episode += 1
episode_socre, _ = self.run_episode(sess, t_max)
episode_score_list.append(episode_socre)
GD[str(self.name)] = episode_score_list
if self.name == 'W0':
print('Episode: %f, score: %f' % (episode, episode_socre))
print('\n')
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
with tf.Session() as sess:
with tf.device("/cpu:0"):
A = Access(batch_size, state_size, action_size)
F_list = []
for i in range(NUMS_CPU):
F_list.append(Worker('W%i' % i, A, batch_size, state_size, action_size))
COORD = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
sess.graph.finalize()
threads_list = []
for ac in F_list:
job = lambda: ac.run(sess, max_episodes)
t = threading.Thread(target=job)
t.start()
threads_list.append(t)
COORD.join(threads_list)
A.save(sess, 'model/saver_1.ckpt')
###Output
WARNING:tensorflow:The `skip_connections` argument will be deprecated. Please use snt.SkipConnectionCore instead.
WARNING:tensorflow:The `skip_connections` argument will be deprecated. Please use snt.SkipConnectionCore instead.
WARNING:tensorflow:The `skip_connections` argument will be deprecated. Please use snt.SkipConnectionCore instead.
WARNING:tensorflow:The `skip_connections` argument will be deprecated. Please use snt.SkipConnectionCore instead.
###Markdown
test
###Code
tf.reset_default_graph()
import tensorflow as tf
from agent.access import Access
from agent.framework import Framework
from emulator.main import Account
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
%matplotlib inline
state_size = 58
batch_size = 50
action_size = 3
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
with tf.device("/cpu:0"):
A = Access(batch_size, state_size, action_size)
W = Framework('W0', A, batch_size, state_size, action_size)
A.restore(sess,'model/saver_1.ckpt')
W.init_or_update_local(sess)
env = Account()
state = env.reset()
for _ in range(200):
action = W.get_deterministic_policy_action(sess, state)
state, reward, done = env.step(action)
value, reward = env.plot_data()
pd.Series(value).plot(figsize=(16,6))
pd.Series(reward).plot(figsize=(16,6))
pd.Series(np.zeros_like(reward)).plot(figsize=(16,6), color='r')
###Output
_____no_output_____ |
1_data_scraping_places_api.ipynb | ###Markdown
Objective: Data Scraping around a Particular Location with Google Places API Initial Setup Library Imports
###Code
import requests
import json
import pandas as pd
import time
###Output
_____no_output_____
###Markdown
Define API Key
###Code
api_key = 'key_to_API'
###Output
_____no_output_____
###Markdown
Define Latitude & Longitude for Dhaka, Bangladesh
###Code
latitude = 23.810331
longitude = 90.412521
###Output
_____no_output_____
###Markdown
Additional Search Paramaters
###Code
radius = 1500 # Radius of Search: 1500 meters
loc_type = 'restaurant'
###Output
_____no_output_____
###Markdown
Sending Request to Google Places API
###Code
# Search URL for API Request
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}".format(
latitude, longitude, radius, loc_type, api_key)
payload = {}
headers = {}
# `requests` library used to GET API response
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
###Output
{
"html_attributions" : [],
"next_page_token" : "Aap_uEB24X7rlTANxqJNEffKU9qPXXJmPTVUCFc7kEAGowlvDXQD9nIBY8bQiHvQUD6XR0NXuK8JEYwfPH7sILMXo7MjmXx8QUoPAcqk81cwbyls-cWP6ROJCI18cu7WIpH5Vv7jFr4FqTN_F-CyrJo5eG9SkCx9NC27NuMYbU7xMZiB1OBkUXptCNCZxQFnBHPZVYHg4r6yTr0A6ts3NGTxaVNxlNFqEZzvoqn8GZYXUbpCAernBDRH8PyYCgTn33r8MWGfQEnlfIlWQ29Jo5jIn2tUY74CDUxlSD1DRp1BiiEO8zXE9a-tQHiFQgzqclUBgJoO8Sx9y0_I0w_GF1Fp6UN3N-6E99NISlN9bXj5sVNliIaC7E475sR9YD9aAoBSIT65PGvtIB9CchDB8iYgcq-0YfZeXO5BS57oOk25T1uQAvxtqzTv5OYeO_yz",
"results" : [
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8120563,
"lng" : 90.4220469
},
"viewport" : {
"northeast" : {
"lat" : 23.8133625302915,
"lng" : 90.42340033029151
},
"southwest" : {
"lat" : 23.8106645697085,
"lng" : 90.42070236970851
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Magpie Thai and Chinese Restaurant",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 720,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/105974512057796278586\"\u003emgh reza\u003c/a\u003e"
],
"photo_reference" : "Aap_uEADIB4zHTB_cE-W6BRfp9x-UYtCULV6FhQaSViIq6CgjfZTUoO6jw4F-6oYXXLCPysJRZ4HBaUszKF9YMVKbm_Xx4YCkE1-_V-l1kgrgm_ptCAaWk9SeR3pwNXdptUFUOn61hVuPs3ZQi_3eGgHZ0eySOXwqheXl__1N774KBtva4hL",
"width" : 1280
}
],
"place_id" : "ChIJozRxWq3HVTcRxryJMys-sFs",
"plus_code" : {
"compound_code" : "RC6C+RR Dhaka, Bangladesh",
"global_code" : "7MMGRC6C+RR"
},
"price_level" : 2,
"rating" : 4,
"reference" : "ChIJozRxWq3HVTcRxryJMys-sFs",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 689,
"vicinity" : "Bashundhara Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8069044,
"lng" : 90.41645989999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8083137802915,
"lng" : 90.41778038029152
},
"southwest" : {
"lat" : 23.8056158197085,
"lng" : 90.41508241970851
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/lodging-71.png",
"icon_background_color" : "#909CE1",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/hotel_pinlet",
"name" : "Hotel Bon Vivant",
"photos" : [
{
"height" : 1918,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/114653761624844801855\"\u003eHotel Bon Vivant\u003c/a\u003e"
],
"photo_reference" : "Aap_uECmjdGJDsWMHY4tF__mBOCjozGV-NCJjjlVFHKGXP6AZPz0UxYcTcNHNSu1W2miuQhQlNfiAvpfqgPmblWgHNxfrZyzSPinNYIXR37kA2SuRyx4z77UVfj_RINVEEyj9kl0SaEUrKkBOnIvG2hyX5PLeERZn4Hg0RTSmwQI-J-Ifi7A",
"width" : 1200
}
],
"place_id" : "ChIJO5WQm6vHVTcRDu1CXf72vko",
"plus_code" : {
"compound_code" : "RC48+QH Dhaka, Bangladesh",
"global_code" : "7MMGRC48+QH"
},
"rating" : 4,
"reference" : "ChIJO5WQm6vHVTcRDu1CXf72vko",
"scope" : "GOOGLE",
"types" : [ "lodging", "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 291,
"vicinity" : "House 8/A, Road 13, North Baridhara, Kalachandpur Main Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8189386,
"lng" : 90.4141559
},
"viewport" : {
"northeast" : {
"lat" : 23.8201255302915,
"lng" : 90.4156312302915
},
"southwest" : {
"lat" : 23.8174275697085,
"lng" : 90.41293326970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Palm View Hotel and Restaurant",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 800,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/102928139062279172353\"\u003eMahmudur Rahman\u003c/a\u003e"
],
"photo_reference" : "Aap_uEA7ngX0jsL4Da_qAd0cBGfzGvkeMX_fKtMnoQzKmR3SLpuQkD7OncDn6EsAWGtzViLhQNNOXgaLTa3IYU-0SiAem8iJ1dOrtTou8rn2VMX6pqbOOveSmUkoeOdlgDJ7NoUTFIxO7y4dmOqPK_jWdJpBNRCJOKnHI5LEkohWsR9hHWFX",
"width" : 1200
}
],
"place_id" : "ChIJBQbGy1fGVTcR3iftX4xYrC4",
"plus_code" : {
"compound_code" : "RC97+HM Dhaka, Bangladesh",
"global_code" : "7MMGRC97+HM"
},
"rating" : 4.2,
"reference" : "ChIJBQbGy1fGVTcR3iftX4xYrC4",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 1819,
"vicinity" : "Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8066349,
"lng" : 90.4162212
},
"viewport" : {
"northeast" : {
"lat" : 23.8080939802915,
"lng" : 90.41749043029152
},
"southwest" : {
"lat" : 23.8053960197085,
"lng" : 90.4147924697085
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Margarita Resturant and Cafe",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 4160,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/110245021993425282046\"\u003erubyat khan\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDB-vv8AHeu_cFgVJijl9gcYH7qn6ek2N2syfr3XXJHxkVeQGwMeJ62uocKeXwz3ZPDBgsmAwHXB9psH5d6X3NL3szvTqXK7c9G7L_n0GK0bbrK8HFGyDLqEsgmlWalMo4IsiOkXYgCY_LQT-mDhjVIhjYp9Clc6niwyCH7kf8j-4Hn",
"width" : 2368
}
],
"place_id" : "ChIJZeTheKzHVTcRwZWaCuoFm_M",
"plus_code" : {
"compound_code" : "RC48+MF Dhaka, Bangladesh",
"global_code" : "7MMGRC48+MF"
},
"rating" : 3.3,
"reference" : "ChIJZeTheKzHVTcRwZWaCuoFm_M",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 4,
"vicinity" : "10/2 Kalachandpur Main Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8091434,
"lng" : 90.4135285
},
"viewport" : {
"northeast" : {
"lat" : 23.8104300302915,
"lng" : 90.4147993302915
},
"southwest" : {
"lat" : 23.8077320697085,
"lng" : 90.41210136970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Dallas Cafe & Restaurant",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 1868,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/112222807597190469976\"\u003eM R K Rizvi\u003c/a\u003e"
],
"photo_reference" : "Aap_uED2_GHr00F3n2dC6xHDyCVkj-lKAbmShHsfRMzy2gE32ve31naLHmBVj8yNPcGwuyV1zQPZhq9OVBp6D0iwW4q6m60jLKktZtA-SVIYBJ5pBhUM3OsmSTgbm6uodV17Uq2f0lCa1V1_vB7aXl7hCNpiqEr-fthzytLEOhs3vwV1C_ae",
"width" : 4000
}
],
"place_id" : "ChIJT_WOwKrHVTcR3tjZ5_sorKE",
"plus_code" : {
"compound_code" : "RC57+MC Dhaka, Bangladesh",
"global_code" : "7MMGRC57+MC"
},
"price_level" : 2,
"rating" : 3.9,
"reference" : "ChIJT_WOwKrHVTcR3tjZ5_sorKE",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 347,
"vicinity" : "Annanya Shopping Complex Road13, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.80728299999999,
"lng" : 90.4170972
},
"viewport" : {
"northeast" : {
"lat" : 23.8085984302915,
"lng" : 90.4184559302915
},
"southwest" : {
"lat" : 23.80590046970849,
"lng" : 90.41575796970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "CP Five Star - Angel Food",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 3120,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/109233611489745482987\"\u003eMehery Chowdhury\u003c/a\u003e"
],
"photo_reference" : "Aap_uED-4A2HYVTeTawGcXjacqsnI5KdM4t_J9q8HjWStOw5Gj3R0yXWcqHKz1TT0vOSGkve-pbnHY3BXKMcepMTvNGIQFolPN1GOtMAIDi_kLPQnp3xRrPZnmhMlJGUE9KX_4-Y-0CEyQknOuOA2QXlH9NFQtd1Ayz-WRSFn0sNMQr5kzCF",
"width" : 4160
}
],
"place_id" : "ChIJ1TA7gKvHVTcR4VZuQ5KxXAY",
"plus_code" : {
"compound_code" : "RC48+WR Dhaka, Bangladesh",
"global_code" : "7MMGRC48+WR"
},
"rating" : 3.9,
"reference" : "ChIJ1TA7gKvHVTcR4VZuQ5KxXAY",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 214,
"vicinity" : "Kalachandpur Main Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8031317,
"lng" : 90.40911009999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8044854802915,
"lng" : 90.4105048302915
},
"southwest" : {
"lat" : 23.8017875197085,
"lng" : 90.40780686970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "hotel Aristocrat in restaurent",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 4160,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/104252118993622156037\"\u003eUber AITS\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDsEMARnVRLkrCSVXfIn2D8DwxNRDTs-7q8uVIP9IbvH-RL69gqWJLyRYYRUf1SmV9bDBcXqZigCkIXTMvvHH5lq_c72PR5mxMRS9DiADEcwiGGQ17VoCX64JefKlJFXTmaRtv00MVK1LBIthyPRw1H70B6Hmdo5a_5sNGensPx1QR3",
"width" : 3120
}
],
"place_id" : "ChIJV3AkwQbHVTcRxtAhuEBoaC4",
"plus_code" : {
"compound_code" : "RC35+7J Dhaka, Bangladesh",
"global_code" : "7MMGRC35+7J"
},
"rating" : 3.8,
"reference" : "ChIJV3AkwQbHVTcRxtAhuEBoaC4",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 9,
"vicinity" : "Road No 68, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8144327,
"lng" : 90.4209805
},
"viewport" : {
"northeast" : {
"lat" : 23.8157832802915,
"lng" : 90.4223832802915
},
"southwest" : {
"lat" : 23.8130853197085,
"lng" : 90.41968531970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Dhaka Mushroom Center",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 1241,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/113128177104777230494\"\u003eshah shafi\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDAvEDZ1Gc_mlTGUML9n8trXBsuLUhUJ27c_ZsNEnY75cPHF0vWYWPpgzzjT4ivc8tdNI1NzaFcYY94weCSS6SnLlxKEfjSJB89W3ZMw4jc8DkI18O8clM9wJFpjcxH9SloZddfew9gp7NbYFj13m9Y1SOC1VnDUlXUvW6aKYef6e3F",
"width" : 1280
}
],
"place_id" : "ChIJeWT4a1LGVTcRjRgnEh4UqWw",
"plus_code" : {
"compound_code" : "RC7C+Q9 Dhaka, Bangladesh",
"global_code" : "7MMGRC7C+Q9"
},
"rating" : 4,
"reference" : "ChIJeWT4a1LGVTcRjRgnEh4UqWw",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 22,
"vicinity" : "39/1, House #Ka-121 Joar Shahara Bazar Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8146254,
"lng" : 90.42095430000001
},
"viewport" : {
"northeast" : {
"lat" : 23.81597663029151,
"lng" : 90.42236758029151
},
"southwest" : {
"lat" : 23.8132786697085,
"lng" : 90.4196696197085
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Aziz Food Corner",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 3120,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/113280283606642504704\"\u003eshohagh hossain\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAIkM1HLk2W1PLBkJkvvOgg4PR8UDPVgsIWOg7HaTOBrreuYv_TvigoZHgtNwRoYhxgFl_24CgyvHLbnwYK1UK39veHYrPhlhMGcKjnrBPHdhdzCzqE6KlLeK0744cIJ5_ggwTX3FTT_u4J71WHD76JQ31od5_lDwh-GBxrNeR1j8KU",
"width" : 4160
}
],
"place_id" : "ChIJkzZla1LGVTcRqHssHErLhGU",
"plus_code" : {
"compound_code" : "RC7C+V9 Dhaka, Bangladesh",
"global_code" : "7MMGRC7C+V9"
},
"rating" : 3.8,
"reference" : "ChIJkzZla1LGVTcRqHssHErLhGU",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 10,
"vicinity" : "Ka-52 Kuril Chowrasta, 52 Progati Sarani Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8118713,
"lng" : 90.42268279999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8132649802915,
"lng" : 90.42402658029151
},
"southwest" : {
"lat" : 23.8105670197085,
"lng" : 90.42132861970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "CFC",
"opening_hours" : {
"open_now" : false
},
"photos" : [
{
"height" : 1200,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/116676593096372528826\"\u003eRashed Uddin\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAXkBiYJp4b9JKfobDbqjDRXzCLCQp5Psq_Frcq0hZl2jjYEEViFCjiJZaJlpdOdG78gCyZirRu9PrDZaRiCMdgaiV5QQASsWUNgYpML5QSQBS0GnEzLg6DF2VkaK86WsPwhRWI31IszSHj81derb-rA8SjpP-p25V3HJZUSrpFeF2O",
"width" : 1600
}
],
"place_id" : "ChIJe2esaU3GVTcRXNtFQPPLAbs",
"plus_code" : {
"compound_code" : "RC6F+P3 Dhaka, Bangladesh",
"global_code" : "7MMGRC6F+P3"
},
"reference" : "ChIJe2esaU3GVTcRXNtFQPPLAbs",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"vicinity" : "Ka-54, 3 Bashundhara Road, Dhaka"
},
{
"business_status" : "CLOSED_TEMPORARILY",
"geometry" : {
"location" : {
"lat" : 23.8005496,
"lng" : 90.41208709999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8018848802915,
"lng" : 90.41339293029151
},
"southwest" : {
"lat" : 23.7991869197085,
"lng" : 90.4106949697085
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Meraki",
"permanently_closed" : true,
"photos" : [
{
"height" : 2448,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/110926987641404359589\"\u003eAhmed Arif Sirajee\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDzRkvzS6Xu-bbW11jB_nexuOueaWtWDHjJJXrY4hZv_k5abgEnK3GH10I1hOfxYpPT-w6JwvVcQWRtLnK2wfCZ_bKgxnEZ7HfHn3ReKFwId25LdyCdP8PfBnvSoEylk2qyGiu7PGd59KU-ceG_l7jTiie0lHUETo9jTS3ox_HktE6f",
"width" : 3264
}
],
"place_id" : "ChIJiXwJjwfHVTcRsAxg1wHvCms",
"plus_code" : {
"compound_code" : "RC26+6R Dhaka, Bangladesh",
"global_code" : "7MMGRC26+6R"
},
"price_level" : 3,
"rating" : 3.9,
"reference" : "ChIJiXwJjwfHVTcRsAxg1wHvCms",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 374,
"vicinity" : "188 Gulshan North Avenue, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8134837,
"lng" : 90.42342049999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8148326802915,
"lng" : 90.42476948029149
},
"southwest" : {
"lat" : 23.8121347197085,
"lng" : 90.42207151970848
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "The Great Kabab Factory",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 1080,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/116819639182447281125\"\u003eImtiaz Khan\u003c/a\u003e"
],
"photo_reference" : "Aap_uEC96MkLy2TgY5Ze4q4stXpe42MoEmLOZBHhU46b5z9TC6jdEhwFlH1ceo5SiGo15eBEhVok6a7QFg62brxzAHHgj9wZS5_M0SELUOYVZCq-2GOw5wSV_WuhGtYpKMbvEdrW7hMaoH0aHe4xPM42cVw_OiOhso3LNcQ_EL39WXa9avzy",
"width" : 1920
}
],
"place_id" : "ChIJxdjmn03GVTcR2o_Hvw0E70k",
"plus_code" : {
"compound_code" : "RC7F+99 Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+99"
},
"price_level" : 2,
"rating" : 4.2,
"reference" : "ChIJxdjmn03GVTcR2o_Hvw0E70k",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 681,
"vicinity" : "Shop #1C-037, 1st Floor, Jamuna Future Park KA 244, Kuril, Pragati Shoroni, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8121157,
"lng" : 90.4239862
},
"viewport" : {
"northeast" : {
"lat" : 23.8135100802915,
"lng" : 90.42531968029149
},
"southwest" : {
"lat" : 23.81081211970849,
"lng" : 90.42262171970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Food 24x7",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 3984,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/101050510210647412274\"\u003eSOHEL talukder\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAKjodJFiI8RGea-hMEsKilmtKogCIq1SGUXTGq2-Vi867_uEpFhoj0KGb6_SqqsrqWS8Gxu4Ht0na0BwVR9mZVwaMMu9ZC5KuETgD-onmlZ8eF9nK74UKOBcfw64_A6KlcFZRohehhCEHBVWGlNs3-d9GrCH8rNBSqi4xUcT5tdxn5",
"width" : 5312
}
],
"place_id" : "ChIJjdY4G03GVTcRmay3rYqqpTg",
"plus_code" : {
"compound_code" : "RC6F+RH Dhaka, Bangladesh",
"global_code" : "7MMGRC6F+RH"
},
"rating" : 3.4,
"reference" : "ChIJjdY4G03GVTcRmay3rYqqpTg",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 87,
"vicinity" : "KA-5A, Jagannathpur Road, Bashundhara Main Gate, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8130019,
"lng" : 90.4237856
},
"viewport" : {
"northeast" : {
"lat" : 23.81435088029151,
"lng" : 90.42513458029151
},
"southwest" : {
"lat" : 23.8116529197085,
"lng" : 90.42243661970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "KFC - Jamuna Future Park Complex",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 1311,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/105658237526378718334\"\u003eMele Jesmin\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDnwkOrVHK_MEB7729WPGtZKfQph2t6hKAfG8hKqLkGlGQ_Rvxl9QHhFMsqdmBFyESBJlffqIvRYOQm6hC3ypDQaUYk7H67LjmHno9oONIt2ujVPGL9Ckgovq0QpWXJsjPbUDfVP396NybXRZAmG71mv2thAowQ8DOOg_0c70GWSYK3",
"width" : 2048
}
],
"place_id" : "ChIJ4ceNB03GVTcRttrs96U94pE",
"plus_code" : {
"compound_code" : "RC7F+6G Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+6G"
},
"price_level" : 2,
"rating" : 4,
"reference" : "ChIJ4ceNB03GVTcRttrs96U94pE",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 332,
"vicinity" : "Shop No # 5C-013, 5th Floor, Jamuna Future Park Complex, KA - 244, Kurli, Progoti Sarani - Debogram Road, ঢাকা"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8129945,
"lng" : 90.4238453
},
"viewport" : {
"northeast" : {
"lat" : 23.8140655302915,
"lng" : 90.4252662302915
},
"southwest" : {
"lat" : 23.8113675697085,
"lng" : 90.42256826970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "BFC (Best Fried Chicken)",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 1301,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/103043899579130649522\"\u003eBFC (Best Fried Chicken)\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAJRRxTxohdvP2XlY8Km9TSQE5djrwRlmaXG4HYQJ9mQaLX2WHTukuxHHFqIZIC9JZEK7A6U1glWvKgrQTiduq9dshNuHEmOzz3r830cXf-HPVXcUPXRc8G9wnaYnqG6QF14VzJAdVjkRCzvwul0ZIBGwzuG2nVGq_mkdDfMXa0HrY",
"width" : 3418
}
],
"place_id" : "ChIJBVG4p03GVTcR-xdNDDSZIoc",
"plus_code" : {
"compound_code" : "RC7F+5G Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+5G"
},
"price_level" : 2,
"rating" : 3.7,
"reference" : "ChIJBVG4p03GVTcR-xdNDDSZIoc",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 65,
"vicinity" : "Shop No. 5D-075, KA-241, Food Court, Jamuna Future Park, Kuril, Progoti Sarani, Baridhara, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8127513,
"lng" : 90.42394059999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8139472302915,
"lng" : 90.4253292302915
},
"southwest" : {
"lat" : 23.8112492697085,
"lng" : 90.42263126970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Tekiya Japanese Cuisine (5C-44)",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 320,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/103241808890413222864\"\u003eAlam Md. Ashrul\u003c/a\u003e"
],
"photo_reference" : "Aap_uEBjX7ITdViJuPC8Ooei4See_2gUlsY2qtArEvnKKWBHbyLKgWsfOyvFmAzY0Vt-cxTpqCyK52qXtIlBYGYZ0QMgK0ycrn02ICoBEN83PsQOsmcfqSkenmL2KuId3I0N8iP1hoiQe5ih_5BMInSafO8lnoDeEQOgJpmUrHmsv-5DXM8W",
"width" : 480
}
],
"place_id" : "ChIJn3qoVUzGVTcR74v8oeiZSD8",
"plus_code" : {
"compound_code" : "RC7F+4H Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+4H"
},
"rating" : 3.9,
"reference" : "ChIJn3qoVUzGVTcR74v8oeiZSD8",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 10,
"vicinity" : "Jamuna Future Park Level 5 Zone C Shop#, 44 NSU Main Campus Road, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8121151,
"lng" : 90.4242986
},
"viewport" : {
"northeast" : {
"lat" : 23.8134760302915,
"lng" : 90.42569028029151
},
"southwest" : {
"lat" : 23.81077806970849,
"lng" : 90.42299231970848
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Jafran Resto",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 735,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/103748631630693037452\"\u003eAsif Mahmud\u003c/a\u003e"
],
"photo_reference" : "Aap_uEDDA6KJy5nCPRwrTLNd_-ibnG8lHJgoWBrJpUI2Y6lKdZAOXMlj39nlPVZviFeKFfZ4v0dcfeU3mAZhIAXvUbATofyLhMqDveBn1LOgjzX8BFj_i7odfN79_0jynJGamx8CIb4TwGw_HaRnt0sk8Bz6t6HVAEfYIBD8xghDpVyaBgBm",
"width" : 720
}
],
"place_id" : "ChIJF4cKZ0vGVTcRX9v-yGH-Bdo",
"plus_code" : {
"compound_code" : "RC6F+RP Dhaka, Bangladesh",
"global_code" : "7MMGRC6F+RP"
},
"price_level" : 2,
"rating" : 4.1,
"reference" : "ChIJF4cKZ0vGVTcRX9v-yGH-Bdo",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 1652,
"vicinity" : "Lift 5, fifth floor, Adept NR Complex( beside Pitha Ghar) ,Opposite to Jamuna Future Park Pocket Gate Road, ঢাকা"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.81362979999999,
"lng" : 90.4239838
},
"viewport" : {
"northeast" : {
"lat" : 23.8153663302915,
"lng" : 90.42524018029151
},
"southwest" : {
"lat" : 23.81266836970849,
"lng" : 90.42254221970849
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Saffron Restaurant and Cafe",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 683,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/115093033992517719552\"\u003eCrise Cardiac Grill\u003c/a\u003e"
],
"photo_reference" : "Aap_uECf_JcYmobF2ehMQvs_88Z0aY_HBE68N88E1BmTLM5MaInINCDg4GfGMwnvoDIuDjGLgDpCOh_b5egxBtdzFacIoYjGER9KnOAzJGyi513sJ1qyqmLnrVhA0rwK1A0fQotAMzfxbq6__9s0DQkJyCb1upDNNKPb4FbghsI8aFZpwBDL",
"width" : 1024
}
],
"place_id" : "ChIJAQAAcDPBVTcR8UZ_dZUjN1E",
"plus_code" : {
"compound_code" : "RC7F+FH Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+FH"
},
"rating" : 3.9,
"reference" : "ChIJAQAAcDPBVTcR8UZ_dZUjN1E",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 142,
"vicinity" : "KA-244, Shop No 1-2, Block- D, Kuril, Progoti Sharani Jamuna Future Park, Dhaka"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8135673,
"lng" : 90.42434019999999
},
"viewport" : {
"northeast" : {
"lat" : 23.8150010302915,
"lng" : 90.4261094302915
},
"southwest" : {
"lat" : 23.81230306970849,
"lng" : 90.42341146970848
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Arsalan Restaurant",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 3024,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/116935211375390332024\"\u003eMahbubur Rahman Shoujatto\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAEKuVJ4wtqhIrudOQQ7GABEq91smnBXcfJCgE1R5iS3qT9py32f5YusVBicMW0h-BbWW3tIQdsePGmzN4huTukzPaIBq6hSE9GfYx6gk7_xwl0oVwJnjJGjJkcvXUkpMY7fwgRk6zFVTtO2GoYvRZi8dbZi7yTeMaspH0d3Mgb-vtV",
"width" : 4032
}
],
"place_id" : "ChIJw-qqrU3GVTcRDRdnX1ChdvE",
"plus_code" : {
"compound_code" : "RC7F+CP Dhaka, Bangladesh",
"global_code" : "7MMGRC7F+CP"
},
"rating" : 3.3,
"reference" : "ChIJw-qqrU3GVTcRDRdnX1ChdvE",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 141,
"vicinity" : "Shop 6,7,8,29 Level 5, Zone D, Jamuna Future Park, Dhaka - 1229, 1229"
},
{
"business_status" : "OPERATIONAL",
"geometry" : {
"location" : {
"lat" : 23.8122767,
"lng" : 90.4247314
},
"viewport" : {
"northeast" : {
"lat" : 23.8136945802915,
"lng" : 90.42605693029151
},
"southwest" : {
"lat" : 23.8109966197085,
"lng" : 90.4233589697085
}
}
},
"icon" : "https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png",
"icon_background_color" : "#FF9E67",
"icon_mask_base_uri" : "https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet",
"name" : "Pizza Lab",
"opening_hours" : {
"open_now" : true
},
"photos" : [
{
"height" : 3024,
"html_attributions" : [
"\u003ca href=\"https://maps.google.com/maps/contrib/102441242742319247303\"\u003eViper Leather\u003c/a\u003e"
],
"photo_reference" : "Aap_uEAXp0vtep67VbvhT1Nqsf5oDQEnuo30D-MThBC2es-zEyvCs5GaHm7G7lVRyR9m7EakTq6WZMAp9ceQH7ZSx04r-IUv2jtdpsVoj-iMBIk2Fj0uZmHfeui9NqBQFzDHI1hfbdxd6ppBgzqsmvUXvP_ws4UPoIgCT_SepTmD09Qyu2_E",
"width" : 4032
}
],
"place_id" : "ChIJ4RPs5EzGVTcRYeUysBl9k6A",
"plus_code" : {
"compound_code" : "RC6F+WV Dhaka, Bangladesh",
"global_code" : "7MMGRC6F+WV"
},
"price_level" : 2,
"rating" : 4.2,
"reference" : "ChIJ4RPs5EzGVTcRYeUysBl9k6A",
"scope" : "GOOGLE",
"types" : [ "restaurant", "food", "point_of_interest", "establishment" ],
"user_ratings_total" : 433,
"vicinity" : "Ka-10/1, Baridhara Road, Jagannathpur, Dhaka"
}
],
"status" : "OK"
}
###Markdown
Converting Response to Python Dictionary
###Code
response_dict = json.loads(response.text)
response_dict
response_dict.keys()
###Output
_____no_output_____
###Markdown
Visualizing a Single Result
###Code
for result in response_dict['results']:
print(result)
print(result.keys())
break
###Output
{'business_status': 'OPERATIONAL', 'geometry': {'location': {'lat': 23.8120563, 'lng': 90.4220469}, 'viewport': {'northeast': {'lat': 23.8133625302915, 'lng': 90.42340033029151}, 'southwest': {'lat': 23.8106645697085, 'lng': 90.42070236970851}}}, 'icon': 'https://maps.gstatic.com/mapfiles/place_api/icons/v1/png_71/restaurant-71.png', 'icon_background_color': '#FF9E67', 'icon_mask_base_uri': 'https://maps.gstatic.com/mapfiles/place_api/icons/v2/restaurant_pinlet', 'name': 'Magpie Thai and Chinese Restaurant', 'opening_hours': {'open_now': True}, 'photos': [{'height': 720, 'html_attributions': ['<a href="https://maps.google.com/maps/contrib/105974512057796278586">mgh reza</a>'], 'photo_reference': 'Aap_uEADIB4zHTB_cE-W6BRfp9x-UYtCULV6FhQaSViIq6CgjfZTUoO6jw4F-6oYXXLCPysJRZ4HBaUszKF9YMVKbm_Xx4YCkE1-_V-l1kgrgm_ptCAaWk9SeR3pwNXdptUFUOn61hVuPs3ZQi_3eGgHZ0eySOXwqheXl__1N774KBtva4hL', 'width': 1280}], 'place_id': 'ChIJozRxWq3HVTcRxryJMys-sFs', 'plus_code': {'compound_code': 'RC6C+RR Dhaka, Bangladesh', 'global_code': '7MMGRC6C+RR'}, 'price_level': 2, 'rating': 4, 'reference': 'ChIJozRxWq3HVTcRxryJMys-sFs', 'scope': 'GOOGLE', 'types': ['restaurant', 'food', 'point_of_interest', 'establishment'], 'user_ratings_total': 689, 'vicinity': 'Bashundhara Road, Dhaka'}
dict_keys(['business_status', 'geometry', 'icon', 'icon_background_color', 'icon_mask_base_uri', 'name', 'opening_hours', 'photos', 'place_id', 'plus_code', 'price_level', 'rating', 'reference', 'scope', 'types', 'user_ratings_total', 'vicinity'])
###Markdown
Identifying the relevant keys for a single result:
###Code
relevant_keys = ['name', 'geometry', 'place_id', 'price_level', 'rating', 'user_ratings_total']
###Output
_____no_output_____
###Markdown
Getting the relevant data for each result
###Code
for result in response_dict['results']:
for key in relevant_keys:
print(key + ' : ' + str(result[key]))
break
###Output
name : Magpie Thai and Chinese Restaurant
geometry : {'location': {'lat': 23.8120563, 'lng': 90.4220469}, 'viewport': {'northeast': {'lat': 23.8133625302915, 'lng': 90.42340033029151}, 'southwest': {'lat': 23.8106645697085, 'lng': 90.42070236970851}}}
place_id : ChIJozRxWq3HVTcRxryJMys-sFs
price_level : 2
rating : 4
user_ratings_total : 689
###Markdown
Function to Extract Relevant Data
###Code
def get_relevant_data(data):
'''
This function goes through a result and returns relevant information
- name
- latitude
- longitude
- rating
- reviews (number of reviews/ratings)
- price level
'''
return {
'name': data['name'],
'latitude': data['geometry']['location']['lat'],
'longitude': data['geometry']['location']['lng'],
'rating': data['rating'],
'reviews': data['user_ratings_total'],
'price_level': data['price_level']
}
###Output
_____no_output_____
###Markdown
Testing the Function:
###Code
# Testing the Function:
restaurant_data = []
for result in response_dict['results']:
restaurant_data.append(get_relevant_data(result))
###Output
_____no_output_____
###Markdown
Since All restaurants might not have **price level/user ratings/user reviews**,we might add a fallback logic to our function for these fields and rewrite
###Code
def get_loc_data(
data, extra_keys = ['rating', 'user_ratings_total', 'price_level']
):
'''
Function to get location data from a API Call result
returns name, place_id, latitude & longitude
also returns extra_keys appended as a python dictionary
'''
data_dict = {}
data_dict['name'] = data['name']
data_dict['place_id'] = data['place_id']
data_dict['latitude'] = data['geometry']['location']['lat']
data_dict['longitude'] = data['geometry']['location']['lng']
for key in extra_keys:
try:
if key == 'price_level':
# Prints $, $$, $$$, $$$$ for price levels 1, 2, 3, 4
data_dict[key] = '$'*data[key]
else:
data_dict[key] = data[key]
except:
data_dict[key] = 'N/A'
return data_dict
###Output
_____no_output_____
###Markdown
Testing the function again:
###Code
restaurant_data = []
for result in response_dict['results']:
restaurant_data.append(get_loc_data(result))
###Output
_____no_output_____
###Markdown
The function seemst to be working Fine. Visualizing the data in a Pandas DataFrame:
###Code
df_res = pd.DataFrame(restaurant_data)
df_res
###Output
_____no_output_____
###Markdown
Get Responses from Multiple Pages:Google Places API provides more than 1 pages of response.To get the data from the next page, the `next_page_token` is used
###Code
# Initial Query
responses = [] # Array to hold multiple responses
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}".format(
latitude, longitude, radius, loc_type, api_key)
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
# Convert JSON Response to Dictionary
response_dict = json.loads(response.text)
responses.append(response_dict)
idx = 1
while ('next_page_token' in response_dict.keys()):
idx += 1
print('Fetching Data From Page %d'%(idx))
time.sleep(2) # Need to wait for 2 seconds for next page data to load
next_page_token = response_dict['next_page_token']
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?pagetoken={}&key={}".format(
next_page_token, api_key)
response = requests.request("GET", url, headers=headers, data=payload)
response_dict = json.loads(response.text)
responses.append(response_dict)
###Output
Fetching Data From Page 2
Fetching Data From Page 3
###Markdown
Now the `get_restaurant_data()` function can be called on each response with nested loops
###Code
restaurant_data_dict = []
for response in responses:
for result in response['results']:
restaurant_data_dict.append(get_loc_data(result))
restaurant_data_dict
###Output
_____no_output_____
###Markdown
Function for Getting Response from Multiple PagesThe above steps can be simplified into a function:
###Code
def get_nearby_locs(latitude, longitude, radius, loc_type, api_key, verbose = True):
responses = [] # Array for storing multiple responses
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={},{}&radius={}&type={}&key={}".format(latitude, longitude, radius, loc_type, api_key)
if verbose: print('Sending Request to Google Places API.')
# Initial Query
response = requests.request("GET", url, headers=headers, data=payload)
response_dict = json.loads(response.text) # Convert to Dictionary
responses.append(response_dict)
while ('next_page_token' in response_dict.keys()):
if verbose: print('Waiting for next page ...')
time.sleep(2) # Wait for 2 seconds for next page to load
next_page_token = response_dict['next_page_token']
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?pagetoken={}&key={}".format(next_page_token, api_key)
response = requests.request("GET", url, headers=headers, data=payload)
response_dict = json.loads(response.text)
responses.append(response_dict)
if verbose: print('Received all responses. Scraping responses ...', end = '\t')
data_dict = [] # Array of dictionaries containing data
for response in responses:
for result in response['results']:
data_dict.append(get_loc_data(result))
if verbose: print('Complete!')
return data_dict
###Output
_____no_output_____
###Markdown
Testing The Function:
###Code
locs = get_nearby_locs(latitude, longitude, radius, loc_type, api_key)
###Output
Sending Request to Google Places API.
Waiting for next page ...
Waiting for next page ...
Received all responses. Scraping responses ... Complete!
###Markdown
Google Places API Provides a maximum of 3 pages of response, 20 responses per pageSo the maximum number of responses: 60 Checking the number of responses:
###Code
len(locs)
###Output
_____no_output_____
###Markdown
Displaying the Datafame
###Code
df = pd.DataFrame(locs)
df
###Output
_____no_output_____
###Markdown
The radius of Nearby Search can vary up to a range of 50000 meters (50km)However, the number of results returned by each query is limited to 60We should select an optimal radius so thatThere are no more than 60 restaurants within the radiusWe can survey the restaurants at one of the most densely populated Areas (on the basis of restaurants) in Dhaka: Dhanmondi **Co-Ordinates for Dhanmondi (Satmasjid Rd)**
###Code
# Latitude & Longitude for Dhanmondi (Satmasjid Rd):
latitude = 23.7470913
longitude = 90.3700627
radius = 100
locs = get_nearby_locs(latitude, longitude, radius, loc_type, api_key)
print('Number of Restaurants found within Radius: ' + str(len(locs)))
radius = 150
locs = get_nearby_locs(latitude, longitude, radius, loc_type, api_key)
print('Number of Restaurants found within Radius: ' + str(len(locs)))
radius = 200
locs = get_nearby_locs(latitude, longitude, radius, loc_type, api_key)
print('Number of Restaurants found within Radius: ' + str(len(locs)))
###Output
Sending Request to Google Places API.
Waiting for next page ...
Waiting for next page ...
Received all responses. Scraping responses ... Complete!
Number of Restaurants found within Radius: 60
|
house-price-prediciton-v0.ipynb | ###Markdown
Data Pre-Processing & Visualization
###Code
# check if data loaded correctly
display(train)
train.describe()['SalePrice'] # basic info of SalePrice
# train_data.info() # more technical information of trainning data
# replace the numeric missing values with their respective column means
missing = train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
# visualize the columns have most missing values
missing.plot(kind='bar');
# couple of ways to replace the categorical values:
# 1. one-hot encoding use get_dummies() from pandas
# 2. manually assigning values
# 3. encoding with integer values based on their realationship with SalePrice.
categorical_features = train.select_dtypes(include='object').columns
numerical_features = train.select_dtypes(exclude='object').columns
# ANOVA test to see the significance of categorical features
# Inspired by: https://www.kaggle.com/dgawlik/house-prices-eda
def anova(frame):
anv = pd.DataFrame()
anv['feature'] = categorical_features
pvals = []
for c in categorical_features:
samples = []
for cls in frame[c].unique(): # find sale prices associated with each categorical feature
s = frame[frame[c] == cls]['SalePrice'].values
if len(s) != 0: # drop features with null values
samples.append(s)
pval = stats.f_oneway(*samples)[1] # use pval to see influence of each feature to SalePrice
pvals.append(pval)
anv['pval'] = pvals
return anv.sort_values('pval')
a = anova(train)
a.dropna(inplace=True) # drop features have null values
a['disparity'] = np.log(1 / a['pval'].values)
plt.figure(figsize=(8,8))
sns.barplot(data=a, x='feature', y='disparity')
x=plt.xticks(rotation=90)
# encode the categorical features based on their mean SalePrice
# For each categorical feature:
# 1. find SalePrice mean for all labels in that categorical feature,
# 2. reorder the labels based on mean SalePrice ascending
# 3. assign different integer values to each label based on their order
# 4. convert the dictionary type and iterate through every label
# 4.1 inside the loop, change every label to its corresponding integer value in train dataset
def encode_categorical_features(frame, feature):
ordering = pd.DataFrame()
ordering['val'] = frame[feature].unique()
ordering.index = ordering.val
ordering['spmean'] = frame[[feature, 'SalePrice']].groupby(feature).mean()['SalePrice']
ordering = ordering.sort_values('spmean')
ordering['index'] = range(1, ordering.shape[0]+1)
ordering = ordering['index'].to_dict()
for label, index in ordering.items():
frame.loc[frame[feature]==label, feature+'_E'] = index
categorical_features_encoded = []
for feature in categorical_features:
encode_categorical_features(train, feature)
categorical_features_encoded.append(feature+'_E')
categorical_features_encoded
# also encode categorical features for combined dataset
for feature in categorical_features:
encode_categorical_features(all_data, feature)
all_features = numerical_features.tolist() + categorical_features_encoded
# drop original categorical features that are not encoded
train = train.drop(columns=categorical_features)
all_data = all_data.drop(columns=categorical_features)
# correlations
corr = train.corr()
corr.sort_values(by=['SalePrice'], ascending=False, inplace=True)
# visualize correlations associate with SalePrice
plt.figure(figsize=(8,18))
ax = sns.barplot(data=corr[['SalePrice']], x='SalePrice', y=corr.index, orient='h')
ax.set_xlabel('Correlations with SalePrice', fontsize=16)
ax.set_ylabel('features', fontsize=16);
# Alternative way to plot correlations using pandas.plot()
# ax = corr[['SalePrice']].plot(kind='barh', figsize=(8,24), fontsize=12)
# ax.set_xlabel('correlations with SalePrice', fontsize=16)
# ax.set_ylabel('features', fontsize=16);
# use seaborn.pairplot() to see the relations between SalePrice and some most important features
columns = ['SalePrice', 'OverallQual', 'GrLivArea', 'Neighborhood_E', 'ExterQual_E', 'GarageArea']
sns.set() # set aesthetic parameters in one step
sns.pairplot(train[columns]);
###Output
_____no_output_____
###Markdown
Data Preparation
###Code
# Log transform of the skewed numerical features to reduce the impact of outliers
# Inspired by: https://www.kaggle.com/juliencs/a-study-on-regression-applied-to-the-ames-dataset
# Generally, a skewness absolute val of > 0.5 is considered moderate skewed
skewness = train[all_features].apply(lambda x: stats.skew(x))
skewness = skewness[abs(skewness) > 0.5]
print(skewness.shape[0], 'skewed numerical features to log transform')
skewed_features = skewness.index
train[skewed_features] = np.log1p(train[skewed_features])
# filling all NaN values with its column mean
train = train.fillna(0.)
all_data = all_data.fillna(0.)
# prepare train and test dataset for model trainning
y = train.SalePrice
train = train.drop(columns=['Id', 'SalePrice'])
all_data = all_data.drop(columns=['Id', 'SalePrice'])
all_features.remove('SalePrice')
all_features.remove('Id')
numerical_features = numerical_features.drop(['Id', 'SalePrice'])
# split the training set
X_train, X_test, y_train, y_test = train_test_split(train, y, test_size=0.3, random_state=0)
print('all_data shape: ', all_data.shape)
print('train shape: ', train.shape)
print('X_train shape: ', X_train.shape)
print('X_test shape: ', X_test.shape)
print('y_train shape: ', y_train.shape)
print('y_test shape: ', y_test.shape)
# error measurement: RMSE
# scorer = make_scorer(mean_squared_error, greater_is_better=False)
def rmse_cv_train(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y_train, scoring='neg_mean_squared_error', cv=10))
return (rmse)
def rmse_cv_test(model):
rmse = np.sqrt(-cross_val_score(model, X_test, y_test, scoring='neg_mean_squared_error', cv=10))
return (rmse)
###Output
_____no_output_____
###Markdown
Train the Linear Models
###Code
# Standard linear regression without regularization
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
print('Linear Regression RMSE on training set: ', rmse_cv_train(lr).mean())
print('Linear Regression RMSE on test set: ', rmse_cv_test(lr).mean())
y_train_predict = np.exp(lr.predict(X_train))
y_test_predict = np.exp(lr.predict(X_test))
# plot predictions vs real values
plt.scatter(y_train_predict, np.exp(y_train), c='blue', marker='s', label='training data')
plt.scatter(y_test_predict, np.exp(y_test), c='green', marker='s', label='test data')
plt.title('Linear Regression')
plt.xlabel('predicted values')
plt.ylabel('real values')
plt.legend(loc='upper left')
# Lasso linear regression (L1 penalty)
lasso = linear_model.LassoCV(max_iter=10000)
lasso.fit(X_train, y_train)
print('Lasso RMSE on trainning set: ', rmse_cv_train(lasso).mean())
print('Lasso RMSE on test set: ', rmse_cv_test(lasso).mean())
# Ridge linear regression (L2 penalty)
ridge = linear_model.RidgeCV()
ridge.fit(X_train, y_train)
print('Ridge RMSE on training set: ', rmse_cv_train(ridge).mean())
print('Ridge RMSE on test set: ', rmse_cv_test(ridge).mean())
# ElasticNet linear regression (L1 and L2 penalty)
elasticNet = linear_model.ElasticNetCV()
elasticNet.fit(X_train, y_train)
print('ElasticNet RMSE on training set: ', rmse_cv_train(elasticNet).mean())
print('ElasticNet RMSE on test set: ', rmse_cv_test(elasticNet).mean())
###Output
ElasticNet RMSE on training set: 0.19209338073961083
ElasticNet RMSE on test set: 0.18528287323525056
|
notebooks/4. Chart Editor.ipynb | ###Markdown
JupyterLab Chart EditorThis example relies on the `jupyterlab-chart-editor` extension (https://github.com/plotly/jupyterlab-chart-editor) Imports
###Code
import plotly.graph_objs as go
import plotly.io as pio
import pandas as pd
###Output
_____no_output_____
###Markdown
Load DatasetLoad gapminder dataset for 1982
###Code
data = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv")
df_1982 = data[data['year']==1982]
df_1982 = df_1982.sort_values(['continent', 'country'])
df_1982.head()
###Output
_____no_output_____
###Markdown
Create empty FigureWidget
###Code
fig = go.FigureWidget()
fig
###Output
_____no_output_____
###Markdown
Add scatter trace per continent
###Code
for continent, continent_df in df_1982.groupby('continent'):
fig.add_scatter(x=continent_df.lifeExp,
y=continent_df.gdpPercap,
marker={'size': continent_df['pop'].tolist(), 'sizemode': 'area', 'sizeref': 200000},
mode='markers',
text=continent_df.country,
name=continent)
###Output
_____no_output_____
###Markdown
Write figure to json file
###Code
pio.write_json(fig, 'out/gapminder.plotly.json')
###Output
_____no_output_____
###Markdown
Edit with JupyterLab chart editorRight click on `out/gapminder.plotly.json` and "Open With -> PlotlyEditor"Make edits to the chart to add a title, axis titles, trace coloring, annotations, etc. Then use "File -> Save As" to save modified figure as `out/gapminder-styled.plotly.json Read modified figure back into Python
###Code
fig_styled = pio.read_json('out/gapminder-styled.plotly.json',
output_type='FigureWidget')
fig_styled
###Output
_____no_output_____
###Markdown
Save edited figure to HTML
###Code
# Save as standalone html
from plotly.offline import plot
plot(fig_styled, filename='out/gapminder-styled.html', auto_open=False);
###Output
_____no_output_____
###Markdown
Save edited figure as pdf
###Code
# Save as image
pio.write_image(fig_styled, 'out/gapminder-styled.pdf')
###Output
_____no_output_____ |
docs/tryit.ipynb | ###Markdown
Try it ! **argopy** will manage for you all the complicated stuff of localising, downloading and formatting Argo data. Focus on your science ! So, simply import the ``argopy`` data fetcher:
###Code
from argopy import DataFetcher as ArgoDataFetcher
###Output
_____no_output_____
###Markdown
Then, get access to Argo data with 1 line of code:
###Code
ds = ArgoDataFetcher(src='erddap').region([-75, -45, 20, 30, 0, 100, '2011-01', '2011-02']).to_xarray()
###Output
_____no_output_____
###Markdown
In this example, we used a data fetcher to get data for a given space/time region. We retrieved all Argo data measurements from 75W to 45W, 20N to 30N, 0db to 100db and from January to May 2011 (the max date is exclusive). Data are returned as a collection of measurements in a [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html).
###Code
ds
###Output
_____no_output_____
###Markdown
Fetched data are returned as a 1D array collection of measurements. If you prefer to work with a 2D array collection of vertical profiles, simply transform the dataset with the [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html) accessor method [argo.point2profile](https://argopy.readthedocs.io/en/latest/api.htmlargopy.ArgoAccessor.point2profile):
###Code
ds = ds.argo.point2profile()
ds
###Output
_____no_output_____
###Markdown
You can also fetch data for a specific float using its [WMO number](<https://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html):
###Code
f = ArgoDataFetcher().float(6902746)
f.to_xarray()
###Output
_____no_output_____
###Markdown
and in this case, you can visualise float data without download:
###Code
f.dashboard()
###Output
_____no_output_____
###Markdown
or for a float profile using the cycle number:
###Code
ArgoDataFetcher().profile(6902755, 12).to_xarray()
###Output
_____no_output_____
###Markdown
and finally, if you want to work with data interpolated on the same vertical levels (in pressure), than you can simply do it as well like:
###Code
import numpy as np
ds = ArgoDataFetcher().float(6902746).to_xarray()
ds = ds.argo.point2profile()
ds
ds.argo.interp_std_levels(np.arange(0,1000,10))
###Output
_____no_output_____
###Markdown
don't forget that if you prefer to work with Pandas dataframe, it is as simple as:
###Code
ds.to_dataframe()
###Output
_____no_output_____
###Markdown
**argopy** will manage for you all the complicated stuff of localising, downloading and formatting Argo data. Focus on your science ! So, simply import the ``argopy`` data fetcher:
###Code
from argopy import DataFetcher as ArgoDataFetcher
###Output
_____no_output_____
###Markdown
Then, get access to Argo data with 1 line of code:
###Code
ds = ArgoDataFetcher(src='erddap').region([-75, -45, 20, 30, 0, 100, '2011-01', '2011-02']).to_xarray()
###Output
_____no_output_____
###Markdown
In this example, we used a data fetcher to get data for a given space/time region. We retrieved all Argo data measurements from 75W to 45W, 20N to 30N, 0db to 100db and from January to May 2011 (the max date is exclusive). Data are returned as a collection of measurements in a [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html).
###Code
ds
###Output
_____no_output_____
###Markdown
Fetched data are returned as a 1D array collection of measurements. If you prefer to work with a 2D array collection of vertical profiles, simply transform the dataset with the [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html) accessor method [argo.point2profile](https://argopy.readthedocs.io/en/latest/api.htmlargopy.ArgoAccessor.point2profile):
###Code
ds = ds.argo.point2profile()
ds
###Output
_____no_output_____
###Markdown
You can also fetch data for a specific float using its [WMO number](<https://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html):
###Code
f = ArgoDataFetcher().float(6902746)
f.to_xarray()
###Output
_____no_output_____
###Markdown
and in this case, you can visualise float data without download:
###Code
f.dashboard()
###Output
_____no_output_____
###Markdown
or for a float profile using the cycle number:
###Code
ArgoDataFetcher().profile(6902755, 12).to_xarray()
###Output
_____no_output_____
###Markdown
and finally, if you want to work with data interpolated on the same vertical levels (in pressure), than you can simply do it as well like:
###Code
import numpy as np
ds = ArgoDataFetcher().float(6902746).to_xarray()
ds = ds.argo.point2profile()
ds
ds.argo.interp_std_levels(np.arange(0,1000,10))
###Output
_____no_output_____
###Markdown
don't forget that if you prefer to work with Pandas dataframe, it is as simple as:
###Code
ds.to_dataframe()
###Output
_____no_output_____
###Markdown
Import the ``argopy`` data fetcher:
###Code
from argopy import DataFetcher as ArgoDataFetcher
###Output
_____no_output_____
###Markdown
Then, to get access to Argo data, all you need is 1 line of code:
###Code
ds = ArgoDataFetcher().region([-75, -45, 20, 30, 0, 100, '2011', '2012']).to_xarray()
###Output
_____no_output_____
###Markdown
In this example, we used a data fetcher to get data for a given space/time region. We retrieved all Argo data measurements from 75W to 45W, 20N to 30N, 0db to 100db and from January to May 2011 (the max date is exclusive). Data are returned as a collection of measurements in a [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html).
###Code
print(ds)
###Output
_____no_output_____
###Markdown
Fetched data are returned as a 1D array collection of measurements. If you prefer to work with a 2D array collection of vertical profiles, simply transform the dataset with the [xarray.Dataset](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html) accessor method [argo.point2profile](https://argopy.readthedocs.io/en/latest/api.htmlargopy.ArgoAccessor.point2profile):
###Code
ds = ds.argo.point2profile()
print(ds)
###Output
_____no_output_____
###Markdown
You can also fetch data for a specific float using its [WMO number](<https://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html):
###Code
ds = ArgoDataFetcher().float(6902746).to_xarray()
###Output
_____no_output_____
###Markdown
or for a float profile using the cycle number:
###Code
ds = ArgoDataFetcher().profile(6902755, 12).to_xarray()
###Output
_____no_output_____ |
codes/labs_lecture05/lab04_test_set/test_set_demo.ipynb | ###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/gdrive')
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
print(path_to_file)
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4684505041440328 error= 30.008333424727123 percent
epoch= 1 loss= 0.8564613926410675 error= 16.839999973773956 percent
epoch= 2 loss= 0.6868373988072077 error= 14.933333218097685 percent
epoch= 3 loss= 0.605110855102539 error= 13.984999656677246 percent
epoch= 4 loss= 0.5555406566460928 error= 13.373333096504211 percent
epoch= 5 loss= 0.5218008504311243 error= 12.863333423932394 percent
epoch= 6 loss= 0.49696191052595773 error= 12.485000014305115 percent
epoch= 7 loss= 0.4778312099973361 error= 12.136666735013325 percent
epoch= 8 loss= 0.4624840295314789 error= 11.886666735013327 percent
epoch= 9 loss= 0.4498103124896685 error= 11.656666855017345 percent
epoch= 10 loss= 0.4391136873761813 error= 11.566666980584463 percent
epoch= 11 loss= 0.42996521989504494 error= 11.388333658377329 percent
epoch= 12 loss= 0.4219842804471652 error= 11.235000193119049 percent
epoch= 13 loss= 0.41503328015406926 error= 11.100000341733297 percent
epoch= 14 loss= 0.40877720693747205 error= 10.94833360115687 percent
epoch= 15 loss= 0.4032208156585693 error= 10.801666855812073 percent
epoch= 16 loss= 0.39817363063494365 error= 10.710000256697336 percent
epoch= 17 loss= 0.39360576182603835 error= 10.621666848659515 percent
epoch= 18 loss= 0.38941252648830416 error= 10.533333559830984 percent
epoch= 19 loss= 0.38555791984001797 error= 10.453333636124928 percent
epoch= 20 loss= 0.38201582511266075 error= 10.346666971842449 percent
epoch= 21 loss= 0.3787171320617199 error= 10.27833354473114 percent
epoch= 22 loss= 0.37563293904066086 error= 10.210000375906626 percent
epoch= 23 loss= 0.3727933439115683 error= 10.161666870117188 percent
epoch= 24 loss= 0.3701192660133044 error= 10.11166669925054 percent
epoch= 25 loss= 0.36760630453626314 error= 10.028333445390066 percent
epoch= 26 loss= 0.36521513839562736 error= 9.971666971842447 percent
epoch= 27 loss= 0.36297007381916047 error= 9.93500018119812 percent
epoch= 28 loss= 0.36087442845106127 error= 9.865000228087109 percent
epoch= 29 loss= 0.358873764226834 error= 9.84166693687439 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.3100004196167 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= F.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
# find automatically the path of the folder containing "file_name" :
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
# if previous search failed or too long, comment the previous line and simply write down manually the path below :
#path_to_file = '/content/gdrive/My Drive/CE7454_2020_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# change current path to the folder containing "file_name"
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4684505041440328 error= 30.008333424727123 percent
epoch= 1 loss= 0.8564613926410675 error= 16.839999973773956 percent
epoch= 2 loss= 0.6868373988072077 error= 14.933333218097685 percent
epoch= 3 loss= 0.605110855102539 error= 13.984999656677246 percent
epoch= 4 loss= 0.5555406566460928 error= 13.373333096504211 percent
epoch= 5 loss= 0.5218008504311243 error= 12.863333423932394 percent
epoch= 6 loss= 0.49696191052595773 error= 12.485000014305115 percent
epoch= 7 loss= 0.4778312099973361 error= 12.136666735013325 percent
epoch= 8 loss= 0.4624840295314789 error= 11.886666735013327 percent
epoch= 9 loss= 0.4498103124896685 error= 11.656666855017345 percent
epoch= 10 loss= 0.4391136873761813 error= 11.566666980584463 percent
epoch= 11 loss= 0.42996521989504494 error= 11.388333658377329 percent
epoch= 12 loss= 0.4219842804471652 error= 11.235000193119049 percent
epoch= 13 loss= 0.41503328015406926 error= 11.100000341733297 percent
epoch= 14 loss= 0.40877720693747205 error= 10.94833360115687 percent
epoch= 15 loss= 0.4032208156585693 error= 10.801666855812073 percent
epoch= 16 loss= 0.39817363063494365 error= 10.710000256697336 percent
epoch= 17 loss= 0.39360576182603835 error= 10.621666848659515 percent
epoch= 18 loss= 0.38941252648830416 error= 10.533333559830984 percent
epoch= 19 loss= 0.38555791984001797 error= 10.453333636124928 percent
epoch= 20 loss= 0.38201582511266075 error= 10.346666971842449 percent
epoch= 21 loss= 0.3787171320617199 error= 10.27833354473114 percent
epoch= 22 loss= 0.37563293904066086 error= 10.210000375906626 percent
epoch= 23 loss= 0.3727933439115683 error= 10.161666870117188 percent
epoch= 24 loss= 0.3701192660133044 error= 10.11166669925054 percent
epoch= 25 loss= 0.36760630453626314 error= 10.028333445390066 percent
epoch= 26 loss= 0.36521513839562736 error= 9.971666971842447 percent
epoch= 27 loss= 0.36297007381916047 error= 9.93500018119812 percent
epoch= 28 loss= 0.36087442845106127 error= 9.865000228087109 percent
epoch= 29 loss= 0.358873764226834 error= 9.84166693687439 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.3100004196167 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= F.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# move to Google Drive directory
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
# 一层全连接网络MLP
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
# 直接输出
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4437378692626952 error= 27.724999944369 percent
epoch= 1 loss= 0.8495811134576797 error= 16.763333439826965 percent
epoch= 2 loss= 0.684249259630839 error= 14.93666648864746 percent
epoch= 3 loss= 0.6038358103235563 error= 14.021666467189789 percent
epoch= 4 loss= 0.5549377659956615 error= 13.358333230018616 percent
epoch= 5 loss= 0.5214912140369415 error= 12.855000038941702 percent
epoch= 6 loss= 0.496866315305233 error= 12.488333344459534 percent
epoch= 7 loss= 0.4778200144569079 error= 12.143333276112875 percent
epoch= 8 loss= 0.462452745338281 error= 11.916666686534882 percent
epoch= 9 loss= 0.44988458315531415 error= 11.7133336464564 percent
epoch= 10 loss= 0.43926046838363014 error= 11.468333443005879 percent
epoch= 11 loss= 0.4301446111996969 error= 11.330000181992848 percent
epoch= 12 loss= 0.42219560166200004 error= 11.208333532015482 percent
epoch= 13 loss= 0.4152207482854525 error= 11.053333640098572 percent
epoch= 14 loss= 0.4090089357892672 error= 10.96833344300588 percent
epoch= 15 loss= 0.40343874037265776 error= 10.808333476384481 percent
epoch= 16 loss= 0.39840445985396705 error= 10.726666788260141 percent
epoch= 17 loss= 0.3938293455044429 error= 10.618333538373312 percent
epoch= 18 loss= 0.38965175608793895 error= 10.551666975021362 percent
epoch= 19 loss= 0.38581369668245313 error= 10.45000014702479 percent
epoch= 20 loss= 0.3822577769557635 error= 10.398333688577017 percent
epoch= 21 loss= 0.3789850358168284 error= 10.286666850248972 percent
epoch= 22 loss= 0.37589806601405146 error= 10.228333612283071 percent
epoch= 23 loss= 0.3730201779305935 error= 10.166666865348816 percent
epoch= 24 loss= 0.3703724993268649 error= 10.111666937669119 percent
epoch= 25 loss= 0.36784836699565254 error= 10.033333341280619 percent
epoch= 26 loss= 0.3654692569375038 error= 10.000000158945719 percent
epoch= 27 loss= 0.36324590335289636 error= 9.961666961510977 percent
epoch= 28 loss= 0.361117193599542 error= 9.931666990121206 percent
epoch= 29 loss= 0.3591053718328476 error= 9.853333353996277 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.170000195503235 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
# find automatically the path of the folder containing "file_name" :
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
# if previous search failed or too long, comment the previous line and simply write down manually the path below :
#path_to_file = '/content/gdrive/My Drive/CS5242_2021_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# change current path to the folder containing "file_name"
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4437378692626952 error= 27.724999944369 percent
epoch= 1 loss= 0.8495811134576797 error= 16.763333439826965 percent
epoch= 2 loss= 0.684249259630839 error= 14.93666648864746 percent
epoch= 3 loss= 0.6038358103235563 error= 14.021666467189789 percent
epoch= 4 loss= 0.5549377659956615 error= 13.358333230018616 percent
epoch= 5 loss= 0.5214912140369415 error= 12.855000038941702 percent
epoch= 6 loss= 0.496866315305233 error= 12.488333344459534 percent
epoch= 7 loss= 0.4778200144569079 error= 12.143333276112875 percent
epoch= 8 loss= 0.462452745338281 error= 11.916666686534882 percent
epoch= 9 loss= 0.44988458315531415 error= 11.7133336464564 percent
epoch= 10 loss= 0.43926046838363014 error= 11.468333443005879 percent
epoch= 11 loss= 0.4301446111996969 error= 11.330000181992848 percent
epoch= 12 loss= 0.42219560166200004 error= 11.208333532015482 percent
epoch= 13 loss= 0.4152207482854525 error= 11.053333640098572 percent
epoch= 14 loss= 0.4090089357892672 error= 10.96833344300588 percent
epoch= 15 loss= 0.40343874037265776 error= 10.808333476384481 percent
epoch= 16 loss= 0.39840445985396705 error= 10.726666788260141 percent
epoch= 17 loss= 0.3938293455044429 error= 10.618333538373312 percent
epoch= 18 loss= 0.38965175608793895 error= 10.551666975021362 percent
epoch= 19 loss= 0.38581369668245313 error= 10.45000014702479 percent
epoch= 20 loss= 0.3822577769557635 error= 10.398333688577017 percent
epoch= 21 loss= 0.3789850358168284 error= 10.286666850248972 percent
epoch= 22 loss= 0.37589806601405146 error= 10.228333612283071 percent
epoch= 23 loss= 0.3730201779305935 error= 10.166666865348816 percent
epoch= 24 loss= 0.3703724993268649 error= 10.111666937669119 percent
epoch= 25 loss= 0.36784836699565254 error= 10.033333341280619 percent
epoch= 26 loss= 0.3654692569375038 error= 10.000000158945719 percent
epoch= 27 loss= 0.36324590335289636 error= 9.961666961510977 percent
epoch= 28 loss= 0.361117193599542 error= 9.931666990121206 percent
epoch= 29 loss= 0.3591053718328476 error= 9.853333353996277 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.170000195503235 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
# find automatically the path of the folder containing "file_name" :
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
# if previous search failed or too long, comment the previous line and simply write down manually the path below :
#path_to_file = '/content/gdrive/My Drive/CS5242_2021_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# change current path to the folder containing "file_name"
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4437378692626952 error= 27.724999944369 percent
epoch= 1 loss= 0.8495811134576797 error= 16.763333439826965 percent
epoch= 2 loss= 0.684249259630839 error= 14.93666648864746 percent
epoch= 3 loss= 0.6038358103235563 error= 14.021666467189789 percent
epoch= 4 loss= 0.5549377659956615 error= 13.358333230018616 percent
epoch= 5 loss= 0.5214912140369415 error= 12.855000038941702 percent
epoch= 6 loss= 0.496866315305233 error= 12.488333344459534 percent
epoch= 7 loss= 0.4778200144569079 error= 12.143333276112875 percent
epoch= 8 loss= 0.462452745338281 error= 11.916666686534882 percent
epoch= 9 loss= 0.44988458315531415 error= 11.7133336464564 percent
epoch= 10 loss= 0.43926046838363014 error= 11.468333443005879 percent
epoch= 11 loss= 0.4301446111996969 error= 11.330000181992848 percent
epoch= 12 loss= 0.42219560166200004 error= 11.208333532015482 percent
epoch= 13 loss= 0.4152207482854525 error= 11.053333640098572 percent
epoch= 14 loss= 0.4090089357892672 error= 10.96833344300588 percent
epoch= 15 loss= 0.40343874037265776 error= 10.808333476384481 percent
epoch= 16 loss= 0.39840445985396705 error= 10.726666788260141 percent
epoch= 17 loss= 0.3938293455044429 error= 10.618333538373312 percent
epoch= 18 loss= 0.38965175608793895 error= 10.551666975021362 percent
epoch= 19 loss= 0.38581369668245313 error= 10.45000014702479 percent
epoch= 20 loss= 0.3822577769557635 error= 10.398333688577017 percent
epoch= 21 loss= 0.3789850358168284 error= 10.286666850248972 percent
epoch= 22 loss= 0.37589806601405146 error= 10.228333612283071 percent
epoch= 23 loss= 0.3730201779305935 error= 10.166666865348816 percent
epoch= 24 loss= 0.3703724993268649 error= 10.111666937669119 percent
epoch= 25 loss= 0.36784836699565254 error= 10.033333341280619 percent
epoch= 26 loss= 0.3654692569375038 error= 10.000000158945719 percent
epoch= 27 loss= 0.36324590335289636 error= 9.961666961510977 percent
epoch= 28 loss= 0.361117193599542 error= 9.931666990121206 percent
epoch= 29 loss= 0.3591053718328476 error= 9.853333353996277 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.170000195503235 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# move to Google Drive directory
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4437378692626952 error= 27.724999944369 percent
epoch= 1 loss= 0.8495811134576797 error= 16.763333439826965 percent
epoch= 2 loss= 0.684249259630839 error= 14.93666648864746 percent
epoch= 3 loss= 0.6038358103235563 error= 14.021666467189789 percent
epoch= 4 loss= 0.5549377659956615 error= 13.358333230018616 percent
epoch= 5 loss= 0.5214912140369415 error= 12.855000038941702 percent
epoch= 6 loss= 0.496866315305233 error= 12.488333344459534 percent
epoch= 7 loss= 0.4778200144569079 error= 12.143333276112875 percent
epoch= 8 loss= 0.462452745338281 error= 11.916666686534882 percent
epoch= 9 loss= 0.44988458315531415 error= 11.7133336464564 percent
epoch= 10 loss= 0.43926046838363014 error= 11.468333443005879 percent
epoch= 11 loss= 0.4301446111996969 error= 11.330000181992848 percent
epoch= 12 loss= 0.42219560166200004 error= 11.208333532015482 percent
epoch= 13 loss= 0.4152207482854525 error= 11.053333640098572 percent
epoch= 14 loss= 0.4090089357892672 error= 10.96833344300588 percent
epoch= 15 loss= 0.40343874037265776 error= 10.808333476384481 percent
epoch= 16 loss= 0.39840445985396705 error= 10.726666788260141 percent
epoch= 17 loss= 0.3938293455044429 error= 10.618333538373312 percent
epoch= 18 loss= 0.38965175608793895 error= 10.551666975021362 percent
epoch= 19 loss= 0.38581369668245313 error= 10.45000014702479 percent
epoch= 20 loss= 0.3822577769557635 error= 10.398333688577017 percent
epoch= 21 loss= 0.3789850358168284 error= 10.286666850248972 percent
epoch= 22 loss= 0.37589806601405146 error= 10.228333612283071 percent
epoch= 23 loss= 0.3730201779305935 error= 10.166666865348816 percent
epoch= 24 loss= 0.3703724993268649 error= 10.111666937669119 percent
epoch= 25 loss= 0.36784836699565254 error= 10.033333341280619 percent
epoch= 26 loss= 0.3654692569375038 error= 10.000000158945719 percent
epoch= 27 loss= 0.36324590335289636 error= 9.961666961510977 percent
epoch= 28 loss= 0.361117193599542 error= 9.931666990121206 percent
epoch= 29 loss= 0.3591053718328476 error= 9.853333353996277 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.170000195503235 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
# find automatically the path of the folder containing "file_name" :
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
# if previous search failed or too long, comment the previous line and simply write down manually the path below :
#path_to_file = '/content/gdrive/My Drive/AI6103_2020_codes/codes/labs_lecture05/lab04_test_set'
print(path_to_file)
# change current path to the folder containing "file_name"
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4684505041440328 error= 30.008333424727123 percent
epoch= 1 loss= 0.8564613926410675 error= 16.839999973773956 percent
epoch= 2 loss= 0.6868373988072077 error= 14.933333218097685 percent
epoch= 3 loss= 0.605110855102539 error= 13.984999656677246 percent
epoch= 4 loss= 0.5555406566460928 error= 13.373333096504211 percent
epoch= 5 loss= 0.5218008504311243 error= 12.863333423932394 percent
epoch= 6 loss= 0.49696191052595773 error= 12.485000014305115 percent
epoch= 7 loss= 0.4778312099973361 error= 12.136666735013325 percent
epoch= 8 loss= 0.4624840295314789 error= 11.886666735013327 percent
epoch= 9 loss= 0.4498103124896685 error= 11.656666855017345 percent
epoch= 10 loss= 0.4391136873761813 error= 11.566666980584463 percent
epoch= 11 loss= 0.42996521989504494 error= 11.388333658377329 percent
epoch= 12 loss= 0.4219842804471652 error= 11.235000193119049 percent
epoch= 13 loss= 0.41503328015406926 error= 11.100000341733297 percent
epoch= 14 loss= 0.40877720693747205 error= 10.94833360115687 percent
epoch= 15 loss= 0.4032208156585693 error= 10.801666855812073 percent
epoch= 16 loss= 0.39817363063494365 error= 10.710000256697336 percent
epoch= 17 loss= 0.39360576182603835 error= 10.621666848659515 percent
epoch= 18 loss= 0.38941252648830416 error= 10.533333559830984 percent
epoch= 19 loss= 0.38555791984001797 error= 10.453333636124928 percent
epoch= 20 loss= 0.38201582511266075 error= 10.346666971842449 percent
epoch= 21 loss= 0.3787171320617199 error= 10.27833354473114 percent
epoch= 22 loss= 0.37563293904066086 error= 10.210000375906626 percent
epoch= 23 loss= 0.3727933439115683 error= 10.161666870117188 percent
epoch= 24 loss= 0.3701192660133044 error= 10.11166669925054 percent
epoch= 25 loss= 0.36760630453626314 error= 10.028333445390066 percent
epoch= 26 loss= 0.36521513839562736 error= 9.971666971842447 percent
epoch= 27 loss= 0.36297007381916047 error= 9.93500018119812 percent
epoch= 28 loss= 0.36087442845106127 error= 9.865000228087109 percent
epoch= 29 loss= 0.358873764226834 error= 9.84166693687439 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.3100004196167 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= F.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/gdrive')
file_name = 'test_set_demo.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
print(path_to_file)
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4684505041440328 error= 30.008333424727123 percent
epoch= 1 loss= 0.8564613926410675 error= 16.839999973773956 percent
epoch= 2 loss= 0.6868373988072077 error= 14.933333218097685 percent
epoch= 3 loss= 0.605110855102539 error= 13.984999656677246 percent
epoch= 4 loss= 0.5555406566460928 error= 13.373333096504211 percent
epoch= 5 loss= 0.5218008504311243 error= 12.863333423932394 percent
epoch= 6 loss= 0.49696191052595773 error= 12.485000014305115 percent
epoch= 7 loss= 0.4778312099973361 error= 12.136666735013325 percent
epoch= 8 loss= 0.4624840295314789 error= 11.886666735013327 percent
epoch= 9 loss= 0.4498103124896685 error= 11.656666855017345 percent
epoch= 10 loss= 0.4391136873761813 error= 11.566666980584463 percent
epoch= 11 loss= 0.42996521989504494 error= 11.388333658377329 percent
epoch= 12 loss= 0.4219842804471652 error= 11.235000193119049 percent
epoch= 13 loss= 0.41503328015406926 error= 11.100000341733297 percent
epoch= 14 loss= 0.40877720693747205 error= 10.94833360115687 percent
epoch= 15 loss= 0.4032208156585693 error= 10.801666855812073 percent
epoch= 16 loss= 0.39817363063494365 error= 10.710000256697336 percent
epoch= 17 loss= 0.39360576182603835 error= 10.621666848659515 percent
epoch= 18 loss= 0.38941252648830416 error= 10.533333559830984 percent
epoch= 19 loss= 0.38555791984001797 error= 10.453333636124928 percent
epoch= 20 loss= 0.38201582511266075 error= 10.346666971842449 percent
epoch= 21 loss= 0.3787171320617199 error= 10.27833354473114 percent
epoch= 22 loss= 0.37563293904066086 error= 10.210000375906626 percent
epoch= 23 loss= 0.3727933439115683 error= 10.161666870117188 percent
epoch= 24 loss= 0.3701192660133044 error= 10.11166669925054 percent
epoch= 25 loss= 0.36760630453626314 error= 10.028333445390066 percent
epoch= 26 loss= 0.36521513839562736 error= 9.971666971842447 percent
epoch= 27 loss= 0.36297007381916047 error= 9.93500018119812 percent
epoch= 28 loss= 0.36087442845106127 error= 9.865000228087109 percent
epoch= 29 loss= 0.358873764226834 error= 9.84166693687439 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.3100004196167 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= F.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____
###Markdown
Lab 04 : Test set evaluation -- demo
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from random import randint
import utils
###Output
_____no_output_____
###Markdown
Download the data and print the sizes
###Code
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
###Output
_____no_output_____
###Markdown
Make a ONE layer net class.
###Code
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
###Output
_____no_output_____
###Markdown
Build the net
###Code
net=one_layer_net(784,10)
print(net)
###Output
one_layer_net(
(linear_layer): Linear(in_features=784, out_features=10, bias=False)
)
###Markdown
Choose the criterion, optimizer, batchsize, learning rate
###Code
criterion = nn.CrossEntropyLoss()
optimizer=torch.optim.SGD( net.parameters() , lr=0.01 )
bs=200
###Output
_____no_output_____
###Markdown
Do 30 passes through the training set
###Code
for epoch in range(30):
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute and accumulate stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# compute stats for the full training set
total_loss = running_loss/num_batches
total_error = running_error/num_batches
print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent')
###Output
epoch= 0 loss= 1.4684505041440328 error= 30.008333424727123 percent
epoch= 1 loss= 0.8564613926410675 error= 16.839999973773956 percent
epoch= 2 loss= 0.6868373988072077 error= 14.933333218097685 percent
epoch= 3 loss= 0.605110855102539 error= 13.984999656677246 percent
epoch= 4 loss= 0.5555406566460928 error= 13.373333096504211 percent
epoch= 5 loss= 0.5218008504311243 error= 12.863333423932394 percent
epoch= 6 loss= 0.49696191052595773 error= 12.485000014305115 percent
epoch= 7 loss= 0.4778312099973361 error= 12.136666735013325 percent
epoch= 8 loss= 0.4624840295314789 error= 11.886666735013327 percent
epoch= 9 loss= 0.4498103124896685 error= 11.656666855017345 percent
epoch= 10 loss= 0.4391136873761813 error= 11.566666980584463 percent
epoch= 11 loss= 0.42996521989504494 error= 11.388333658377329 percent
epoch= 12 loss= 0.4219842804471652 error= 11.235000193119049 percent
epoch= 13 loss= 0.41503328015406926 error= 11.100000341733297 percent
epoch= 14 loss= 0.40877720693747205 error= 10.94833360115687 percent
epoch= 15 loss= 0.4032208156585693 error= 10.801666855812073 percent
epoch= 16 loss= 0.39817363063494365 error= 10.710000256697336 percent
epoch= 17 loss= 0.39360576182603835 error= 10.621666848659515 percent
epoch= 18 loss= 0.38941252648830416 error= 10.533333559830984 percent
epoch= 19 loss= 0.38555791984001797 error= 10.453333636124928 percent
epoch= 20 loss= 0.38201582511266075 error= 10.346666971842449 percent
epoch= 21 loss= 0.3787171320617199 error= 10.27833354473114 percent
epoch= 22 loss= 0.37563293904066086 error= 10.210000375906626 percent
epoch= 23 loss= 0.3727933439115683 error= 10.161666870117188 percent
epoch= 24 loss= 0.3701192660133044 error= 10.11166669925054 percent
epoch= 25 loss= 0.36760630453626314 error= 10.028333445390066 percent
epoch= 26 loss= 0.36521513839562736 error= 9.971666971842447 percent
epoch= 27 loss= 0.36297007381916047 error= 9.93500018119812 percent
epoch= 28 loss= 0.36087442845106127 error= 9.865000228087109 percent
epoch= 29 loss= 0.358873764226834 error= 9.84166693687439 percent
###Markdown
Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set...
###Code
running_error=0
num_batches=0
for i in range(0,10000,bs):
# extract the minibatch
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
# reshape the minibatch
inputs = minibatch_data.view(bs,784)
# feed it to the network
scores=net( inputs )
# compute the error made on this batch
error = utils.get_error( scores , minibatch_label)
# add it to the running error
running_error += error.item()
num_batches+=1
# compute error rate on the full test set
total_error = running_error/num_batches
print( 'error rate on test set =', total_error*100 ,'percent')
###Output
error rate on test set = 9.3100004196167 percent
###Markdown
Choose image at random from the test set and see how good/bad are the predictions
###Code
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= F.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
###Output
_____no_output_____ |
movies/movies_logistic_regression.ipynb | ###Markdown
1Strategy ML Immersion Day Building an xgboost model from movie data
###Code
import json
import math
import sys
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sagemaker as sm
from sagemaker.amazon.amazon_estimator import get_image_uri
import workshop_utils as wu
# prevent warnings from displaying
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Initialize variables
###Code
bucket = '1s-ml'
# !!!!!!!!!!!!!!!!!!
# PUT YOUR NAME HERE
your_name = 'agraves'
model_artifacts_location = f's3://{bucket}/movies/artifacts/{your_name}'
role = sm.get_execution_role()
sm_session = sm.session.Session()
print(f'IAM Role: {role}')
ratings = 'movies/data/title.ratings.tsv'
basics = 'movies/data/title.basics.tsv'
###Output
IAM Role: arn:aws:iam::842337631775:role/service-role/AmazonSageMaker-ExecutionRole-20190722T122244
###Markdown
A note about this datasource: https://datasets.imdbws.comWe will be downloading the data from S3 in order to inspect it and perform any cleanup necessary before we train our model.
###Code
s3 = boto3.resource('s3')
s3.Bucket(bucket).download_file(ratings, 'ratings.tsv')
s3.Bucket(bucket).download_file(basics, 'basics.tsv')
ratings_csv = pd.read_csv('ratings.tsv', sep='\t')
basics_csv = pd.read_csv('basics.tsv', sep='\t')
movie_data = pd.merge(ratings_csv, basics_csv, how='inner', on='tconst')
print(f'Movie Data Shape: {movie_data.shape}')
movie_data.head(15)
###Output
Movie Data Shape: (977590, 11)
###Markdown
CleanupThere are several unecessary columns in this data as well as observations we aren't concerned about. This is an investigation of movie ratings, so we can eliminate the rows which contain data about television shows. This data also contains records from silent films. We can make a reasonable assumption that silent film appreciation is a bit different than modern film appreciation, so we will drop these observations as well.
###Code
# Eliminate TV Shows
movie_data = movie_data[(movie_data.titleType == 'movie') | (movie_data.titleType == 'short') | (movie_data.titleType == 'tvMovie')]
# Shape: (395863, 11)
# Limit to only years with talkies
movie_data = movie_data[movie_data.startYear != '\\N']
movie_data.startYear = movie_data.startYear.astype(int)
movie_data = movie_data[movie_data.startYear > 1927]
# Shape: (383612, 11)
# Remove unnecessary columns
movie_data.drop('originalTitle', axis=1, inplace=True)
movie_data.drop('endYear', axis=1, inplace=True)
movie_data.drop('startYear', axis=1, inplace=True)
movie_data.drop('tconst', axis=1, inplace=True)
movie_data.drop('primaryTitle', axis=1, inplace=True)
movie_data.drop('genres', axis=1, inplace=True)
# I am working to one hot encode the genres column. It requires a custom function.
movie_data.head(15)
###Output
_____no_output_____
###Markdown
There are some values in this data that are not NaN. We need to convert them before dropping them.
###Code
# Convert \\N to NaN
movie_data = movie_data[movie_data != r'\N']
# Check to see how many NaN values we have now that we've dropped the /N entries
movie_data.isna().sum()
# Remove any observations with null values
movie_data.dropna(inplace=True)
movie_data.isna().sum()
###Output
_____no_output_____
###Markdown
Save data for use in the next model
###Code
movie_data.to_csv('movie_data.csv', index=False)
###Output
_____no_output_____
###Markdown
Visualization It is quite important to spend time visualizing your data in order to see any patterns or relations within it. We won't spend much time here mulling over plots, but I wanted to show at least one. The plot below should show the relationship between the number of votes a movie has its average rating. We can see that as the average rating increases a movie receives more votes.
###Code
# # Create a figure instance
fig = plt.figure(1, figsize=(15, 10))
# # Create an axes instance
ax = fig.add_subplot(111)
# plt.plot(movie_data.titleType, movie_data.numVotes, 'o')
plt.plot(movie_data.averageRating, movie_data.numVotes, 'o')
###Output
_____no_output_____
###Markdown
Model Prep The simplest prediction type to grok is a binary outcome. Our data doesn't have a binary value, but we can create one. We will, of course, lose some granularity in the detail of our ratings system, but a binary value for "likability" may be enough to answer our business problem.
###Code
# Create a binary dependent variable
likable = movie_data.apply(lambda row: wu.label_rating(row), axis=1)
movie_data = pd.concat([likable, movie_data], axis=1)
movie_data.rename(columns={0:'likable'}, inplace=True)
movie_data.drop('averageRating', axis=1, inplace=True)
movie_data.head(15)
###Output
_____no_output_____
###Markdown
We have another problematic column: the titleType column. This field indicates the type of film for each observation. There are only three possibilities movie, short, and tvMovie. We can use the *pandas* built in method *.get_dummies()* to create three dummy columns corresponding to the types. This is called one hot encoding, and it is necessary here because the xgboost algorithm requires its inputs to be numeric, not categorical.
###Code
# One Hot Encode titleType column
dummy_types = pd.get_dummies(movie_data['titleType'])
movie_data = pd.concat([movie_data, dummy_types.reindex(movie_data.index)], axis=1)
movie_data.drop('titleType', axis=1, inplace=True)
movie_data.head(15)
# Now that we have only numbers in runtimeMinutes, we can convert to int
movie_data['runtimeMinutes'] = movie_data['runtimeMinutes'].astype(int)
movie_data['movie'] = movie_data['movie'].astype(int)
movie_data['short'] = movie_data['short'].astype(int)
movie_data['tvMovie'] = movie_data['tvMovie'].astype(int)
movie_data.dtypes
###Output
_____no_output_____
###Markdown
A note about splitting data Our data needs to be divided into three pieces. We need a subset of our observations for training the model on, a subset to evaluate the effectiveness of the model, and a subset to test our hosted endpoint. Typically, data is divided between test and validation at a proportion of 70% training to 30% validation. Here, it will be 70% training, 20% validation, and 10% test.The split function below is rather unintuitive. The *.sample()* method is randomly sorting the columns so they are not in a particular order. Then the *.split()* method is dividing the dataset at the 70% mark and the 90% mark, resulting in three new datasets.
###Code
movie_train, movie_eval, movie_test = np.split(movie_data.sample(frac=1, random_state=1278), [int(0.7 * len(movie_data)), int(0.9 * len(movie_data))])
print(f'Movie Train Shape: {movie_train.shape}')
print(f'Movie Eval Shape: {movie_eval.shape}')
print(f'Movie Test Shape: {movie_test.shape}')
movie_train.dtypes
###Output
_____no_output_____
###Markdown
Now that we have our individual data sets, we need to store them in S3 to be retrievable by SageMaker when it creates our training instance.
###Code
movie_train.to_csv('movie_train.csv', header=False, index=False)
train_upload = f'movies/artifacts/{your_name}/movie_train.csv'
s3.Bucket(bucket).Object(train_upload).upload_file('movie_train.csv')
movie_eval.to_csv('movie_eval.csv', header=False, index=False)
eval_upload = f'movies/artifacts/{your_name}/movie_eval.csv'
s3.Bucket(bucket).Object(eval_upload).upload_file('movie_eval.csv')
###Output
_____no_output_____
###Markdown
Create ML resources The designation of the 'xgboost' container below is the indicator that we are using the xgboost algorithm. The SageMaker Python SDK provides an Estimator class for creatng model resources. This is quite similar to TensorFlow except the type of model is designated by the container image we pass into the class. If it were necessary, we could provision multiple training instances to perform the work faster, but in this example a single instance will do.
###Code
container = get_image_uri('us-west-2', 'xgboost', '0.90-1')
xgboost = sm.estimator.Estimator(
container,
role,
base_job_name=f'{your_name}-ml-im',
train_instance_count=1,
train_instance_type='ml.m5.large',
output_path=f's3://{bucket}/movies/artifacts/{your_name}/output',
sagemaker_session=sm_session)
# sc_pos_weight = sum(negative cases) / sum(positive cases)
###Output
_____no_output_____
###Markdown
A note about hyperparametersHyperparameters control the execution of the algorithm being used to train the model. There are many hyperparameters available for the built in xgboost algorithm, but we are only setting six.**max_depth**: maximum depth of a tree. a higher value can induce overfitting.**eta**: shrinks the feature weights during each step. this is useful for preventing overfitting because a higher value makes the boosting more conservative (take smaller steps)**subsample**: sets the amount of data sampled for each tree grown.**objective**: specifies the learning task and type**scale_pos_weight**: controls the balance of weighting between positive and negative weights**num_round**: number of training roundsFor more details on the xgboost hyperparameters, check: https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html
###Code
xgboost.set_hyperparameters(
max_depth=3,
eta=0.1,
subsample=0.5,
objective='binary:logistic',
scale_pos_weight=2.0,
num_round=100)
###Output
_____no_output_____
###Markdown
The *.fit()* method will map data from our locations to the specific channels recognized by the xgboost container. The SageMaker service will then spin up a training instance based on the training attributes we specified when the Estimator was created. This is a syncronous call, so you should see training validation return here in the notebook.
###Code
train_data = sm.s3_input(s3_data=f's3://{bucket}/{train_upload}', content_type='csv')
eval_data = sm.s3_input(s3_data=f's3://{bucket}/{eval_upload}', content_type='csv')
xgboost.fit({'train': train_data, 'validation': eval_data})
###Output
_____no_output_____
###Markdown
Deploy model Now that we have a successfully trained model in SageMaker (you can go verify this in the AWS console), we need to "deploy" this model. The *.deploy()* method will instruct SageMaker to create a hosting instance of our desired size to present our model behind an endpoint.
###Code
xgboost_predict = xgboost.deploy(
initial_instance_count=1,
instance_type='ml.m5.large')
xgboost_predict.content_type = 'text/csv'
xgboost_predict.deserializer = None
# We need to drop the label column in order to retreive inferences for this data
movie_test.drop('likable', axis=1, inplace=True)
movie_test.to_csv('movie_test.csv', header=False, index=False)
movie_test.head(15)
###Output
_____no_output_____
###Markdown
The return value of the *.deploy()* method is the SageMaker endpoint resource. We can use this endpoint resource to retreive inferences of our test data.
###Code
with open('movie_test.csv', 'r') as file:
payload = file.read().strip()
test_data = [line for line in payload.split('\n')]
preds = wu.do_predict(test_data, xgboost_predict)
print(preds)
###Output
_____no_output_____
###Markdown
Delete endpoint when done
###Code
sm_session.delete_endpoint(xgboost_predict.endpoint)
###Output
_____no_output_____ |
proj7/MCPropagationOfUncertainty.ipynb | ###Markdown
Monte-Carlo Propagation of UncertaintySuppose you measure the diameter, the thickness, and the mass of a disk to estimate its density. | attribute | value | uncertainty ($\sigma$) ||-----------|--------|-------------|| diameter (d) | 3.2 cm | 0.1 cm || thickness (t)| 0.6 cm | 0.1 cm || mass (m) | 45.4 g | 0.1 g |How do you compute density? $$\rho = \frac{m}{V} = \frac{m}{\pi (d/2)^2 t} = \frac{4 m}{\pi d^2 t}$$Good! So let's do it:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
d = 3.2
t = 0.6
m = 45.4
V = np.pi*(d/2)**2*t
rho = m/V
print("Density = {0:5.2f} g/cm^3".format(rho))
###Output
Density = 9.41 g/cm^3
###Markdown
So $\rho = 9.41 g/cm^3$, but what's the uncertainty in the density $\sigma_{\rho}$?You could do it with calculus as described [here](Parameter%20Estimation.ipynb), but you can also do it with a Monte-Carlo (MC) technique like this:
###Code
sigma_d = 0.1
sigma_t = 0.1
sigma_m = 0.1
N = 10000
dMC = d + sigma_d*np.random.normal(size=N)
tMC = t + sigma_t*np.random.normal(size=N)
mMC = m + sigma_m*np.random.normal(size=N)
VMC = np.pi*(dMC/2)**2*tMC
rhoMC = mMC/VMC
pl.hist(rhoMC,bins=np.linspace(5,20,21))
pl.grid()
pl.title("MC Estimate of density")
pl.xlim(5,20)
print("Rho estimate: {0:5.3f} +/- {1:5.3f} g/cm^3".format(rhoMC.mean(), 2*rhoMC.std()))
###Output
Rho estimate: 9.768 +/- 3.819 g/cm^3
###Markdown
Monte-Carlo Propagation of UncertaintySuppose you measure the diameter, the thickness, and the mass of a disk to estimate its density. | attribute | value | uncertainty ($\sigma$) ||-----------|--------|-------------|| diameter (d) | 3.2 cm | 0.1 cm || thickness (t)| 0.6 cm | 0.1 cm || mass (m) | 45.4 g | 0.1 g |How do you compute density? $$\rho = \frac{m}{V} = \frac{m}{\pi (d/2)^2 t} = \frac{4 m}{\pi d^2 t}$$Good! So let's do it:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
d = 3.2
t = 0.6
m = 45.4
V = np.pi*(d/2)**2*t
rho = m/V
print("Density = {0:5.2f} g/cm^3".format(rho))
###Output
Density = 9.41 g/cm^3
###Markdown
So $\rho = 9.41 g/cm^3$, but what's the uncertainty in the density $\sigma_{\rho}$?You could do it with calculus as described [here](Parameter%20Estimation.ipynb), but you can also do it with a Monte-Carlo (MC) technique like this:
###Code
sigma_d = 0.1
sigma_t = 0.1
sigma_m = 0.1
N = 10000
dMC = d + sigma_d*np.random.normal(size=N)
tMC = t + sigma_t*np.random.normal(size=N)
mMC = m + sigma_m*np.random.normal(size=N)
VMC = np.pi*(dMC/2)**2*tMC
rhoMC = mMC/VMC
pl.hist(rhoMC,bins=np.linspace(5,20,21))
pl.grid()
pl.title("MC Estimate of density")
pl.xlim(5,20)
print("Rho estimate: {0:5.3f} +/- {1:5.3f} g/cm^3".format(rhoMC.mean(), 2*rhoMC.std()))
###Output
Rho estimate: 9.768 +/- 3.819 g/cm^3
|
adam_api_repo_curve_anomaly_detection/notebooks/.ipynb_checkpoints/Preprocessing Universe Repo-checkpoint.ipynb | ###Markdown
Loading Indices
###Code
#Loading the indices
file_path = '../data/universe_indices.npy'
universe_indices = np.load(file_path)
###Output
_____no_output_____
###Markdown
Computing Universe Repo
###Code
def get_repo_schedules(universe_indices_ric,business_date):
dictionary = {}
for ric in universe_indices_ric:
print('############################## Index {} ##############################'.format(ric))
try:
div_paths = 'RepoCurve/official/{}/PARIS/INTRADAY/equity/{}/sophis'.format(business_date,ric)
ds = DMDSServices('prod', 'APAC')
docs = ds.get_documents(div_paths)
d_s = docs['documents']['document'][0].__values__.get('content')
repo_schedule = xmltodict.parse(d_s)
date = repo_schedule['RepoCurve']['@businessDate']
df = pd.DataFrame(repo_schedule['RepoCurve']['repo'])
df['#text'] = df['#text'].astype(str)
df['@term'] = df['@term'].astype(str)
for i in range(df.shape[0]):
f_date = datetime.strptime(date, "%Y-%m-%d").date()
l_date = datetime.strptime(df['@term'][i], "%Y-%m-%d").date()
delta = l_date - f_date
if (delta.days >= 0):
df['@term'][i] = delta.days
else:
df = df.drop(i, axis = 0)
df = df.reset_index(drop=True)
df = df.get_values()
col1 = df[:,0].tolist()
col2 = df[:,1].tolist()
col = [col1 , col2, date]
dictionary[ric]=col
except:
dictionary[ric]=None
return dictionary
def save_dict(dictionary):
file_path = '../output/universe_repo_processed.json'
try:
with open(file_path, 'w') as fp:
json.dump(dictionary, fp)
print('file saved')
except:
print('For some reasons, the file couldnt be saved')
universe_indices_ric = []
B_to_R = instrumentservice.InstrumentService('prod','APAC')
for index in universe_indices:
index_ric = B_to_R.transcode(index, target='reuter', partial_match=False)
if(index_ric != None):
ric = index_ric[1:]
universe_indices_ric.append(ric)
dictionary = get_repo_schedules(universe_indices_ric,'latest')
save_dict(dictionary)
len(dictionary.keys())
###Output
_____no_output_____
###Markdown
Now cleaning and preprocessing the universe repo curves
###Code
path_to_data_Universe = '../output/universe_repo_processed.json'
path_to_cleaned_data_Universe = '../output/universe_repo_cleaned.json'
print('################## Cleaning dividends for Universe index ##################')
new_dict = {}
with open(path_to_data_Universe) as json_file:
dictionary = json.load(json_file)
for key in list(dictionary.keys()):
if (dictionary[key]!=None):
if np.sum(np.isnan(dictionary[key][0]))==0 and np.sum(np.isnan(list(map(float,dictionary[key][1]))))==0 :
dictionary[key][1] = list(map(float,dictionary[key][1]))
new_dict[key] = dictionary[key]
xvals = [90, 180, 365, 730, 1095, 1460, 1825, 2190, 2555, 2920, 3285, 3650, 4015, 4380]
for key in new_dict.keys():
x = new_dict[key][0]
y = new_dict[key][1]
yinterp = np.interp(xvals, x, y)
#computing new interpolated values
new_dict[key][0] = xvals
new_dict[key][1] = yinterp.tolist()
with open(path_to_cleaned_data_Universe, 'w') as fp:
json.dump(new_dict, fp)
print('file saved')
###Output
_____no_output_____ |
Starbucks_Capstone_notebook-zh.ipynb | ###Markdown
星巴克毕业项目 简介这个数据集是一些模拟 Starbucks rewards 移动 app 上用户行为的数据。每隔几天,星巴克会向 app 的用户发送一些推送。这个推送可能仅仅是一条饮品的广告或者是折扣券或 BOGO(买一送一)。一些顾客可能一连几周都收不到任何推送。 顾客收到的推送可能是不同的,这就是这个数据集的挑战所在。你的任务是将交易数据、人口统计数据和推送数据结合起来判断哪一类人群会受到某种推送的影响。这个数据集是从星巴克 app 的真实数据简化而来。因为下面的这个模拟器仅产生了一种饮品, 实际上星巴克的饮品有几十种。每种推送都有有效期。例如,买一送一(BOGO)优惠券推送的有效期可能只有 5 天。你会发现数据集中即使是一些消息型的推送都有有效期,哪怕这些推送仅仅是饮品的广告,例如,如果一条消息型推送的有效期是 7 天,你可以认为是该顾客在这 7 天都可能受到这条推送的影响。数据集中还包含 app 上支付的交易信息,交易信息包括购买时间和购买支付的金额。交易信息还包括该顾客收到的推送种类和数量以及看了该推送的时间。顾客做出了购买行为也会产生一条记录。 同样需要记住有可能顾客购买了商品,但没有收到或者没有看推送。 示例举个例子,一个顾客在周一收到了满 10 美元减 2 美元的优惠券推送。这个推送的有效期从收到日算起一共 10 天。如果该顾客在有效日期内的消费累计达到了 10 美元,该顾客就满足了该推送的要求。然而,这个数据集里有一些地方需要注意。即,这个推送是自动生效的;也就是说,顾客收到推送后,哪怕没有看到,满足了条件,推送的优惠依然能够生效。比如,一个顾客收到了"满10美元减2美元优惠券"的推送,但是该用户在 10 天有效期内从来没有打开看到过它。该顾客在 10 天内累计消费了 15 美元。数据集也会记录他满足了推送的要求,然而,这个顾客并没被受到这个推送的影响,因为他并不知道它的存在。 清洗清洗数据非常重要也非常需要技巧。你也要考虑到某类人群即使没有收到推送,也会购买的情况。从商业角度出发,如果顾客无论是否收到推送都打算花 10 美元,你并不希望给他发送满 10 美元减 2 美元的优惠券推送。所以你可能需要分析某类人群在没有任何推送的情况下会购买什么。 最后一项建议因为这是一个毕业项目,你可以使用任何你认为合适的方法来分析数据。例如,你可以搭建一个机器学习模型来根据人口统计数据和推送的种类来预测某人会花费多少钱。或者,你也可以搭建一个模型来预测该顾客是否会对推送做出反应。或者,你也可以完全不用搭建机器学习模型。你可以开发一套启发式算法来决定你会给每个顾客发出什么样的消息(比如75% 的35 岁女性用户会对推送 A 做出反应,对推送 B 则只有 40% 会做出反应,那么应该向她们发送推送 A)。 数据集一共有三个数据文件:* portfolio.json – 包括推送的 id 和每个推送的元数据(持续时间、种类等等)* profile.json – 每个顾客的人口统计数据* transcript.json – 交易、收到的推送、查看的推送和完成的推送的记录以下是文件中每个变量的类型和解释 :**portfolio.json*** id (string) – 推送的id* offer_type (string) – 推送的种类,例如 BOGO、打折(discount)、信息(informational)* difficulty (int) – 满足推送的要求所需的最少花费* reward (int) – 满足推送的要求后给与的优惠* duration (int) – 推送持续的时间,单位是天* channels (字符串列表)**profile.json*** age (int) – 顾客的年龄 * became_member_on (int) – 该顾客第一次注册app的时间* gender (str) – 顾客的性别(注意除了表示男性的 M 和表示女性的 F 之外,还有表示其他的 O)* id (str) – 顾客id* income (float) – 顾客的收入**transcript.json*** event (str) – 记录的描述(比如交易记录、推送已收到、推送已阅)* person (str) – 顾客id* time (int) – 单位是小时,测试开始时计时。该数据从时间点 t=0 开始* value - (dict of strings) – 推送的id 或者交易的数额**注意:**如果你正在使用 Workspace,在读取文件前,你需要打开终端/命令行,运行命令 `conda update pandas` 。因为 Workspace 中的 pandas 版本不能正确读入 transcript.json 文件的内容,所以需要更新到 pandas 的最新版本。你可以单击 notebook 左上角橘黄色的 jupyter 图标来打开终端/命令行。 下面两张图展示了如何打开终端/命令行以及如何安装更新。首先打开终端/命令行:然后运行上面的命令:最后回到这个 notebook(还是点击橘黄色的 jupyter 图标),再次运行下面的单元格就不会报错了。
###Code
import pandas as pd
import numpy as np
import math
import json
% matplotlib inline
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
###Output
_____no_output_____
###Markdown
星巴克毕业项目 简介这个数据集是一些模拟 Starbucks rewards 移动 app 上用户行为的数据。每隔几天,星巴克会向 app 的用户发送一些推送。这个推送可能仅仅是一条饮品的广告或者是折扣券或 BOGO(买一送一)。一些顾客可能一连几周都收不到任何推送。 顾客收到的推送可能是不同的,这就是这个数据集的挑战所在。你的任务是将交易数据、人口统计数据和推送数据结合起来判断哪一类人群会受到某种推送的影响。这个数据集是从星巴克 app 的真实数据简化而来。因为下面的这个模拟器仅产生了一种饮品, 实际上星巴克的饮品有几十种。每种推送都有有效期。例如,买一送一(BOGO)优惠券推送的有效期可能只有 5 天。你会发现数据集中即使是一些消息型的推送都有有效期,哪怕这些推送仅仅是饮品的广告,例如,如果一条消息型推送的有效期是 7 天,你可以认为是该顾客在这 7 天都可能受到这条推送的影响。数据集中还包含 app 上支付的交易信息,交易信息包括购买时间和购买支付的金额。交易信息还包括该顾客收到的推送种类和数量以及看了该推送的时间。顾客做出了购买行为也会产生一条记录。 同样需要记住有可能顾客购买了商品,但没有收到或者没有看推送。 示例举个例子,一个顾客在周一收到了满 10 美元减 2 美元的优惠券推送。这个推送的有效期从收到日算起一共 10 天。如果该顾客在有效日期内的消费累计达到了 10 美元,该顾客就满足了该推送的要求。然而,这个数据集里有一些地方需要注意。即,这个推送是自动生效的;也就是说,顾客收到推送后,哪怕没有看到,满足了条件,推送的优惠依然能够生效。比如,一个顾客收到了"满10美元减2美元优惠券"的推送,但是该用户在 10 天有效期内从来没有打开看到过它。该顾客在 10 天内累计消费了 15 美元。数据集也会记录他满足了推送的要求,然而,这个顾客并没被受到这个推送的影响,因为他并不知道它的存在。 清洗清洗数据非常重要也非常需要技巧。你也要考虑到某类人群即使没有收到推送,也会购买的情况。从商业角度出发,如果顾客无论是否收到推送都打算花 10 美元,你并不希望给他发送满 10 美元减 2 美元的优惠券推送。所以你可能需要分析某类人群在没有任何推送的情况下会购买什么。 最后一项建议因为这是一个毕业项目,你可以使用任何你认为合适的方法来分析数据。例如,你可以搭建一个机器学习模型来根据人口统计数据和推送的种类来预测某人会花费多少钱。或者,你也可以搭建一个模型来预测该顾客是否会对推送做出反应。或者,你也可以完全不用搭建机器学习模型。你可以开发一套启发式算法来决定你会给每个顾客发出什么样的消息(比如75% 的35 岁女性用户会对推送 A 做出反应,对推送 B 则只有 40% 会做出反应,那么应该向她们发送推送 A)。 数据集一共有三个数据文件:* portfolio.json – 包括推送的 id 和每个推送的元数据(持续时间、种类等等)* profile.json – 每个顾客的人口统计数据* transcript.json – 交易、收到的推送、查看的推送和完成的推送的记录以下是文件中每个变量的类型和解释 :**portfolio.json*** id (string) – 推送的id* offer_type (string) – 推送的种类,例如 BOGO、打折(discount)、信息(informational)* difficulty (int) – 满足推送的要求所需的最少花费* reward (int) – 满足推送的要求后给与的优惠* duration (int) – 推送持续的时间,单位是天* channels (字符串列表)**profile.json*** age (int) – 顾客的年龄 * became_member_on (int) – 该顾客第一次注册app的时间* gender (str) – 顾客的性别(注意除了表示男性的 M 和表示女性的 F 之外,还有表示其他的 O)* id (str) – 顾客id* income (float) – 顾客的收入**transcript.json*** event (str) – 记录的描述(比如交易记录、推送已收到、推送已阅)* person (str) – 顾客id* time (int) – 单位是小时,测试开始时计时。该数据从时间点 t=0 开始* value - (dict of strings) – 推送的id 或者交易的数额**注意:**如果你正在使用 Workspace,在读取文件前,你需要打开终端/命令行,运行命令 `conda update pandas` 。因为 Workspace 中的 pandas 版本不能正确读入 transcript.json 文件的内容,所以需要更新到 pandas 的最新版本。你可以单击 notebook 左上角橘黄色的 jupyter 图标来打开终端/命令行。 下面两张图展示了如何打开终端/命令行以及如何安装更新。首先打开终端/命令行:然后运行上面的命令:最后回到这个 notebook(还是点击橘黄色的 jupyter 图标),再次运行下面的单元格就不会报错了。
###Code
import pandas as pd
import numpy as np
import math
import json
% matplotlib inline
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
###Output
_____no_output_____ |
notebooks/03/1/Expressions.ipynb | ###Markdown
3.1 Expressions Programming languages are much simpler than human languages. Nonetheless, there are some rules of grammar to learn in any language, and that is where we will begin. In this text, we will use the [Python](https://www.python.org/) programming language. Learning the grammar rules is essential, and the same rules used in the most basic programs are also central to more sophisticated programs.Programs are made up of *expressions*, which describe to the computer how to combine pieces of data. For example, a multiplication expression consists of a `*` symbol between two numerical expressions. Expressions, such as `3 * 4`, are *evaluated* by the computer. The value (the result of *evaluation*) of the last expression in each cell, `12` in this case, is displayed below the cell.
###Code
3 * 4
###Output
_____no_output_____
###Markdown
The grammar rules of a programming language are rigid. In Python, the `*` symbol cannot appear twice in a row. The computer will not try to interpret an expression that differs from its prescribed expression structures. Instead, it will show a `SyntaxError` error. The *Syntax* of a language is its set of grammar rules, and a `SyntaxError` indicates that an expression structure doesn't match any of the rules of the language.
###Code
3 * * 4
###Output
_____no_output_____
###Markdown
Small changes to an expression can change its meaning entirely. Below, the space between the `*`'s has been removed. Because `**` appears between two numerical expressions, the expression is a well-formed *exponentiation* expression (the first number raised to the power of the second: 3 times 3 times 3 times 3). The symbols `*` and `**` are called *operators*, and the values they combine are called *operands*.
###Code
3 ** 4
###Output
_____no_output_____
###Markdown
**Common Operators.** Data science often involves combining numerical values, and the set of operators in a programming language are designed to so that expressions can be used to express any sort of arithmetic. In Python, the following operators are essential.| Expression Type | Operator | Example | Value ||-----------------|----------|------------|-----------|| Addition | `+` | `2 + 3` | `5` || Subtraction | `-` | `2 - 3` | `-1` || Multiplication | `*` | `2 * 3` | `6` || Division | `/` | `7 / 3` | `2.66667` || Remainder | `%` | `7 % 3` | `1` || Exponentiation | `**` | `2 ** 0.5` | `1.41421` | Python expressions obey the same familiar rules of *precedence* as in algebra: multiplication and division occur before addition and subtraction. Parentheses can be used to group together smaller expressions within a larger expression.
###Code
1 + 2 * 3 * 4 * 5 / 6 ** 3 + 7 + 8 - 9 + 10
1 + 2 * (3 * 4 * 5 / 6) ** 3 + 7 + 8 - 9 + 10
###Output
_____no_output_____
###Markdown
Programming languages are much simpler than human languages. Nonetheless, there are some rules of grammar to learn in any language, and that is where we will begin. In this text, we will use the [Python](https://www.python.org/) programming language. Learning the grammar rules is essential, and the same rules used in the most basic programs are also central to more sophisticated programs.Programs are made up of *expressions*, which describe to the computer how to combine pieces of data. For example, a multiplication expression consists of a `*` symbol between two numerical expressions. Expressions, such as `3 * 4`, are *evaluated* by the computer. The value (the result of *evaluation*) of the last expression in each cell, `12` in this case, is displayed below the cell.
###Code
3 * 4
###Output
_____no_output_____
###Markdown
The grammar rules of a programming language are rigid. In Python, the `*` symbol cannot appear twice in a row. The computer will not try to interpret an expression that differs from its prescribed expression structures. Instead, it will show a `SyntaxError` error. The *Syntax* of a language is its set of grammar rules, and a `SyntaxError` indicates that an expression structure doesn't match any of the rules of the language.
###Code
3 * * 4
###Output
_____no_output_____
###Markdown
Small changes to an expression can change its meaning entirely. Below, the space between the `*`'s has been removed. Because `**` appears between two numerical expressions, the expression is a well-formed *exponentiation* expression (the first number raised to the power of the second: 3 times 3 times 3 times 3). The symbols `*` and `**` are called *operators*, and the values they combine are called *operands*.
###Code
3 ** 4
###Output
_____no_output_____
###Markdown
**Common Operators.** Data science often involves combining numerical values, and the set of operators in a programming language are designed to so that expressions can be used to express any sort of arithmetic. In Python, the following operators are essential.| Expression Type | Operator | Example | Value ||-----------------|----------|------------|-----------|| Addition | `+` | `2 + 3` | `5` || Subtraction | `-` | `2 - 3` | `-1` || Multiplication | `*` | `2 * 3` | `6` || Division | `/` | `7 / 3` | `2.66667` || Remainder | `%` | `7 % 3` | `1` || Exponentiation | `**` | `2 ** 0.5` | `1.41421` | Python expressions obey the same familiar rules of *precedence* as in algebra: multiplication and division occur before addition and subtraction. Parentheses can be used to group together smaller expressions within a larger expression.
###Code
1 + 2 * 3 * 4 * 5 / 6 ** 3 + 7 + 8 - 9 + 10
1 + 2 * (3 * 4 * 5 / 6) ** 3 + 7 + 8 - 9 + 10
###Output
_____no_output_____ |
notebooks/01.04-test_on_synthetic_data.ipynb | ###Markdown
Numpy version test on synthetic data
###Code
import sys
import os
# %load ~/Desktop/defaults.py
#import pandas as pd
#import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
#sys.path.append(os.path.abspath("../code/"))
#from quigly import *
# %load ../code/quigly.py
import numpy as np
def get_C_j(phi_a, phi_b, phi_c):
'''Return the GMM covariance matrix given phi parameters'''
v11 = np.exp(2.0*phi_a)
v12 = phi_c*np.exp(phi_a)
v22 = phi_c**2 + np.exp(2.0*phi_b)
C_j = np.matrix([[v11, v12],[v12, v22]])
return C_j
def get_gmm_j(x, y, x_j, y_j, C_j):
'''Returns the j^th normalized Gaussian mixture model'''
x = x - x_j
y = y - y_j
rr = np.stack([x,y], axis=0)
exp_arg = np.sum(rr * np.linalg.solve(C_j, rr), axis=0)
gmm_j_raw = np.exp(-0.5 * exp_arg)
normalization = 2.0*np.pi*np.sqrt(np.linalg.det(C_j))
gmm_j = 1.0/normalization * gmm_j_raw
return gmm_j
def gmm_z_js(theta_js):
'''Return the GMM z_j's given components and theta_js'''
z_js = np.ones(len(theta_js)+1)
for j, theta_j in enumerate(theta_js):
z_js[j+1] = z_js[j] + np.exp(-theta_js[j])
z_js = z_js / np.sum(z_js)
return z_js
def compute_model_a(x, y, z_js, x_js, y_js, phi_as, phi_bs, phi_cs):
'''Takes in x, y and GMM kernel params, returns model'''
N_GMM, = x_js.shape
gmm_out = 0.0
for j in range(N_GMM):
C_j = get_C_j(phi_as[j], phi_bs[j], phi_cs[j])
gmm_out += z_js[j] * get_gmm_j(x, y, x_js[j], y_js[j], C_j)
return gmm_out
def get_f_star_nocov(A, sigma_r, d):
'''Compute the profile likelihood of the stellar fluxes'''
#For now ignore the covariance matrix, assume homoscedastic
ATA = np.dot(A.T, A / sigma_r)
f_star = np.linalg.solve(ATA, np.dot(A.T, d/sigma_r))
return f_star
def get_A_matrix(x, y, x_c, y_c, kernel_params):
'''Return the column-wise concatenated A matrix'''
N_stars = len(x_c)
xx = x[:, np.newaxis] - x_c.T
yy = y[:, np.newaxis] - y_c.T
A_matrix = np.zeros(xx.shape)
for i in range(N_stars):
z_js, x_js, y_js, phi_as, phi_bs, phi_cs = kernel_params.T
A_matrix[:, i] = compute_model_a(xx[:,i], yy[:,i],
z_js, x_js, y_js, phi_as, phi_bs, phi_cs)
return A_matrix
def split_params(params, N_star, N_GMM):
'''Split/clean all parameters into star and Kernel parameters'''
star_params = params[0:N_star*2].reshape((N_star, -1))
fixed_gmm_params = np.array([1, 0, 0])
kern_params = np.hstack([fixed_gmm_params, params[N_star*2:]]).reshape((N_GMM, -1))
kern_params[:, 0] = gmm_z_js(kern_params[1:, 0])
return star_params, kern_params
def lnlike(params):
'''Return the likelihood given the parameters'''
star_params, kern_params = split_params(params, 4, 3)
x_c, y_c = star_params[0, :], star_params[1, :]
# Just a homoscedastic noise matrix for now
#C_noise_matrix = get_C_matrix(sigma_r, N_pix)
# Get the design matrix for stellar fluxes
A_matrix = get_A_matrix(x, y, x_c, y_c, kern_params)
# Compute the profile likelihood for stellar fluxes
f_star = get_f_star_nocov(A_matrix, sigma_r, data)
model = np.dot(A_matrix, f_star)
resid = data - model
lnlike_out = np.dot(resid.T, resid / yerr**2)
return lnlike_out
star_true = np.array([[10.3, 20.5],
[83.1, 45.3],
[60.8, 80.9],
[80.8, 10.5]])
f_true = np.array([520.5, 1403.9, 3511.1, 1590.9])*4.0
x_ctrue, y_ctrue = star_true[:, 0], star_true[:, 1]
kern_true = np.array([ [0.50, 0.0, 0.0, 0.8, 0.5, 0.1],
[0.35, 2.5, -3.1, 0.2, 0.2, 0.5],
[0.15, -9.9, 0.5, 1.5, 2.2, 0.2] ] )
# Data preparation. This should happen outside the likelihood!
sigma_r = 4.0 # read noise
yerr = sigma_r
data_2D = np.load('../data/synthetic_100x100.npy')
nx_pix, ny_pix = data_2D.shape
xpix = np.arange(0, nx_pix, 1)
ypix = np.arange(0, ny_pix, 1)
xv, yv = np.meshgrid(xpix, ypix)
x = xv.reshape(-1)
y = yv.reshape(-1)
data = data_2D.reshape(-1)
N_pix = np.shape(data)
plt.imshow(data_2D)
for i in range(4):
plt.plot(x_ctrue[i], y_ctrue[i], 'ko', ms=2)
plt.plot(x_ctrue[i], y_ctrue[i], 'wo', ms=1)
plt.text(x_ctrue[i], y_ctrue[i]+10, 'ABCD'[i], color='w')
###Output
_____no_output_____
###Markdown
Compute the ln likelihood for the image
###Code
kern_guess = np.hstack([ [0.8, 0.5, 0.1],
[0.3, 2.5, -3.1, 0.2, 0.2, 0.5],
[0.6, -9.9, 0.5, 1.5, 2.2, 0.2] ] )
params = np.hstack((star_true.reshape(-1), kern_guess))
N_star = 4
N_GMM = 3
%%timeit
lnlike(params)
###Output
5.91 ms ± 706 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
|
Word Cloud with Python/Word Cloud.ipynb | ###Markdown
Word Cloud
###Code
word_cloud = {}
lst = []
for unique_word in unique_words:
tmp = 0
for word in txt.split(' '):
if (word == unique_word):
tmp += 1
lst.append([unique_word, tmp])
word_cloud[unique_word] = tmp
word_cloud['an']
import pandas as pd
df = pd.DataFrame(lst, columns = ['word', 'freq'])
df.sort_values(by = 'freq', ascending = False).head(10)
df = pd.read_csv('t_asv.csv')
len(df)
df.isnull().sum()
txt = ''
for line in df['t']:
txt += line + ' '
words = txt.split(' ')
unique_words = list(set(txt.split(' ')))
word_cloud = {}
lst = []
for unique_word in unique_words:
tmp = 0
for word in txt.split(' '):
if (word == unique_word):
tmp += 1
lst.append([unique_word, tmp])
word_cloud[unique_word] = tmp
len(word_cloud)
len(unique_words)
256/30918.0
3000/60
unique_words
###Output
_____no_output_____ |
T3-Temperature.ipynb | ###Markdown
Loading Fine-Tuned PLMs
###Code
import torch
cuda0 = torch.device("cuda:0")
#cuda1 = torch.device("cuda:1")
#cuda3 = torch.device("cuda:3")
t5 = T5FineTuner.load_from_checkpoint("T5Models/T5Both.ckpt")
bart = BARTFineTuner.load_from_checkpoint("BARTModels/BARTBoth.ckpt")
t5.to(cuda0)
bart.to(cuda0)
###Output
_____no_output_____
###Markdown
Global Temperature
###Code
#Import Land Temp Dataset
ds_gtemp = pd.read_csv("Data/GlobalTemperature/GlobalLandTemperaturesByCountry.csv")
ds_gtemp = ds_gtemp.dropna()
ds_gtemp['dt'] = pd.to_datetime(ds_gtemp['dt'])
ds_gtemp['month'] = pd.DatetimeIndex(ds_gtemp['dt']).month
ds_gtemp['month'] = ds_gtemp['month'].apply(lambda x: calendar.month_name[x])
ds_gtemp['year'] = pd.DatetimeIndex(ds_gtemp['dt']).year
ds_gtemp.set_index(['dt'],inplace=True)
#RE Scores
template_re_scores = []
t5_re_scores = []
t5_re_scores_topk = []
t5_re_scores_topp = []
bart_re_scores = []
bart_re_scores_topk = []
bart_re_scores_topp = []
#Diveristy Scores
template_tte_scores = []
t5_tte_scores = []
t5_tte_scores_topk = []
t5_tte_scores_topp = []
bart_tte_scores = []
bart_tte_scores_topk = []
bart_tte_scores_topp = []
#Grammar Scores
t5_g_scores = []
t5_g_scores_topk = []
t5_g_scores_topp = []
bart_g_scores = []
bart_g_scores_topk = []
bart_g_scores_topp = []
#Grammar Mistakes
t5_g_mistake = []
t5_g_mistake_topk = []
t5_g_mistake_topp = []
bart_g_mistake = []
bart_g_mistake_topk = []
bart_g_mistake_topp = []
countries = ['United States', 'India', 'Brazil', 'Russia', 'United Kingdom', 'France', 'Spain', 'Italy' , 'Turkey', 'Germany']
for c in countries:
print("Processing Country: ", c)
country = ds_gtemp[ds_gtemp['Country']==c][['AverageTemperature','month', 'year']].reset_index().drop(columns=['dt'])
country_gtemp_raw = country['AverageTemperature'].tolist()
#Log-normalize data
trans = np.ma.log(country_gtemp_raw)
country_gtemp = trans.filled(0)
print("\n Data Loaded")
#Detecting Waves
embeds, cluster_labels = segmentation.tslr_rep(country_gtemp)
cluster_arrangement = utils.find_contiguous(cluster_labels)
indices = utils.find_indices(cluster_arrangement)
wave_indices = utils.find_waves(country_gtemp_raw, indices, tolerance=7)
print("\n Waves Detected")
#Detecting Trends
segmentation_results = segmentation.sliding_window(country_gtemp, 7)
print("\n Segmentation Done")
filtered_results = segmentation.re_segment(segmentation_results, country_gtemp)
trends = segmentation.find_trend(filtered_results, country_gtemp)
print("\n Trends Detected")
location = c
graph, essentials = data2graph.build_graph_gtemp_form1("Global Temperature", location, wave_indices, trends, country, country_gtemp_raw )
print("\n Graph Calculated")
#Template Narrative
template_text = data2graph.build_template_gtemp_nums("Global Temperature", location, wave_indices, trends, country, country_gtemp_raw )
print("\n Templated Computed")
t5_prefix = 'translate Graph to English: '
iso = c
#Simple PLM Generation
t5_narrative = graph2text_nobeam(t5, graph, t5_prefix, 512, cuda0)
bart_narrative = graph2text_nobeam(bart , graph, "", 512, cuda0)
bart_narrative = re.sub('</s>' , '', bart_narrative)
print("Simple Generation Complete: ", iso)
#Top-k at 50
t5_narrative_topk = graph2text_nobeam_topk(t5, graph, t5_prefix, 50, 512, cuda0)
bart_narrative_topk = graph2text_nobeam_topk(bart, graph, "", 50, 512, cuda0)
bart_narrative_topk = re.sub('</s>' , '', bart_narrative_topk)
print("Top-k Complete: ", iso)
#Top-p at 0.92
t5_narrative_topp = graph2text_nobeam_topp(t5, graph, t5_prefix, 0.92, 512, cuda0)
bart_narrative_topp = graph2text_nobeam_topp(bart, graph, "", 0.92, 512, cuda0)
bart_narrative_topp = re.sub('</s>' , '', bart_narrative_topp)
print("Top-p Complete: ", iso)
#RE Scores
template_re_scores.append(textstat.flesch_reading_ease(template_text))
t5_re_scores.append(textstat.flesch_reading_ease(t5_narrative))
t5_re_scores_topk.append(textstat.flesch_reading_ease(t5_narrative_topk))
t5_re_scores_topp.append(textstat.flesch_reading_ease(t5_narrative_topp))
bart_re_scores.append(textstat.flesch_reading_ease(bart_narrative))
bart_re_scores_topk.append(textstat.flesch_reading_ease(bart_narrative_topk))
bart_re_scores_topp.append(textstat.flesch_reading_ease(bart_narrative_topp))
print("RE Scores Computed: ", iso)
#Diveristy Scores
template_tte_scores.append(ld.ttr(ld.flemmatize(template_text)))
t5_tte_scores.append(ld.ttr(ld.flemmatize(t5_narrative)))
t5_tte_scores_topk.append(ld.ttr(ld.flemmatize(t5_narrative_topk)))
t5_tte_scores_topp.append(ld.ttr(ld.flemmatize(t5_narrative_topp)))
bart_tte_scores.append(ld.ttr(ld.flemmatize(bart_narrative)))
bart_tte_scores_topk.append(ld.ttr(ld.flemmatize(bart_narrative_topk)))
bart_tte_scores_topp.append(ld.ttr(ld.flemmatize(bart_narrative_topp)))
print("TTE Scores Computed: ", iso)
#Grammar Scores
gs = grammar_score(t5_narrative)
t5_g_scores.append(gs)
if gs != 1.0:
t5_g_mistake.append((graph, t5_narrative))
gs = grammar_score(t5_narrative_topk)
t5_g_scores_topk.append(gs)
if gs != 1.0:
t5_g_mistake_topk.append((graph, t5_narrative_topk))
gs = grammar_score(t5_narrative_topp)
t5_g_scores_topp.append(gs)
if gs != 1.0:
t5_g_mistake_topp.append((graph, t5_narrative_topp))
gs = grammar_score(bart_narrative)
bart_g_scores.append(gs)
if gs != 1.0:
bart_g_mistake.append((graph, bart_narrative))
gs = grammar_score(bart_narrative_topk)
bart_g_scores_topk.append(gs)
if gs != 1.0:
bart_g_mistake_topk.append((graph, bart_narrative_topk))
gs = grammar_score(bart_narrative_topp)
bart_g_scores_topp.append(gs)
if gs != 1.0:
bart_g_mistake_topp.append((graph, bart_narrative_topp))
print("Grammar Scores Computed: ", iso)
#RE Scores
print("*** RE Scores ***")
print("template_re_scores: ", np.mean(template_re_scores))
print("t5_re_scores: ", np.mean(t5_re_scores))
print("t5_re_scores_topk: ", np.mean(t5_re_scores_topk))
print("t5_re_scores_topp: ", np.mean(t5_re_scores_topp))
print("bart_re_scores: ", np.mean(bart_re_scores))
print("bart_re_scores_topk: ", np.mean(bart_re_scores_topk))
print("bart_re_scores_topp: ", np.mean(bart_re_scores_topp))
print("\n")
print("*** Diversity Scores ***")
#Diveristy Scores
print("template_tte_scores: ", np.mean(template_tte_scores))
print("t5_tte_scores: ", np.mean(t5_tte_scores))
print("t5_tte_scores_topk: ", np.mean(t5_tte_scores_topk))
print("t5_tte_scores_topp: ", np.mean(t5_tte_scores_topp))
print("bart_tte_scores: ", np.mean(bart_tte_scores))
print("bart_tte_scores_topk: ", np.mean(bart_tte_scores_topk))
print("bart_tte_scores_topp: ", np.mean(bart_tte_scores_topp))
print("\n")
print("*** Grammar Scores ***")
#Grammar Scores
print("t5_g_scores: ", np.mean(t5_g_scores))
print("t5_g_scores_topk: ", np.mean(t5_g_scores_topk))
print("t5_g_scores_topp: ", np.mean(t5_g_scores_topp))
print("bart_g_scores: ", np.mean(bart_g_scores))
print("bart_g_scores_topk: ", np.mean(bart_g_scores_topk))
print("bart_g_scores_topp: ", np.mean(bart_g_scores_topp))
###Output
*** RE Scores ***
template_re_scores: -32.60000000000001
t5_re_scores: 67.479
t5_re_scores_topk: 66.064
t5_re_scores_topp: 66.98
bart_re_scores: 63.959
bart_re_scores_topk: 64.581
bart_re_scores_topp: 65.467
*** Diversity Scores ***
template_tte_scores: 0.37362918642999043
t5_tte_scores: 0.39800298942676315
t5_tte_scores_topk: 0.4648429017172216
t5_tte_scores_topp: 0.4459926517363096
bart_tte_scores: 0.4230103896435353
bart_tte_scores_topk: 0.40254574550059957
bart_tte_scores_topp: 0.4141072704373586
*** Grammar Scores ***
t5_g_scores: 0.9160556938702124
t5_g_scores_topk: 0.962284671126657
t5_g_scores_topp: 0.9597113481616681
bart_g_scores: 0.9200155183341259
bart_g_scores_topk: 0.9444883556410734
bart_g_scores_topp: 0.935676241115152
|
_notebooks/2020-04-23-The-Android-App-Market-on-Google-Play.ipynb | ###Markdown
"The Android App Market on Google Play"> "DataCamp Project: The Android App Market on Google Play"- toc: true- branch: master- badges: true- comments: true- categories: [datacamp, projects, python]- hide: false 1. Google Play Store apps and reviewsMobile apps are everywhere. They are easy to create and can be lucrative. Because of these two factors, more and more apps are being developed. In this notebook, we will do a comprehensive analysis of the Android app market by comparing over ten thousand apps in Google Play across different categories. We'll look for insights in the data to devise strategies to drive growth and retention.Let's take a look at the data, which consists of two files:apps.csv: contains all the details of the applications on Google Play. There are 13 features that describe a given app.user_reviews.csv: contains 100 reviews for each app, most helpful first. The text in each review has been pre-processed and attributed with three new features: Sentiment (Positive, Negative or Neutral), Sentiment Polarity and Sentiment Subjectivity.
###Code
# Read in dataset
import pandas as pd
apps_with_duplicates = pd.read_csv('datasets/apps.csv')
# Drop duplicates
apps = apps_with_duplicates.drop_duplicates()
# Print the total number of apps
print('Total number of apps in the dataset = ', len(apps))
# Have a look at a random sample of 5 rows
n = 5
apps.sample(n)
###Output
Total number of apps in the dataset = 9659
###Markdown
2. Data cleaningThe three features that we will be working with most frequently henceforth are Installs, Size, and Price. A careful glance of the dataset reveals that some of these columns mandate data cleaning in order to be consumed by code we'll write later. Specifically, the presence of special characters (, $ +) and letters (M k) in the Installs, Size, and Price columns make their conversion to a numerical data type difficult. Let's clean by removing these and converting each column to a numeric type.
###Code
# List of characters to remove
chars_to_remove = ['+', ',', 'M', '$']
# List of column names to clean
cols_to_clean = ['Installs', 'Size', 'Price']
# Loop for each column
for col in cols_to_clean:
# Replace each character with an empty string
for char in chars_to_remove:
apps[col] = apps[col].str.replace(char, '')
# Convert col to numeric
apps[col] = pd.to_numeric(apps[col])
###Output
_____no_output_____
###Markdown
3. Exploring app categoriesWith more than 1 billion active users in 190 countries around the world, Google Play continues to be an important distribution platform to build a global audience. For businesses to get their apps in front of users, it's important to make them more quickly and easily discoverable on Google Play. To improve the overall search experience, Google has introduced the concept of grouping apps into categories.This brings us to the following questions:Which category has the highest share of (active) apps in the market? Is any specific category dominating the market?Which categories have the fewest number of apps?We will see that there are 33 unique app categories present in our dataset. Family and Game apps have the highest market prevalence. Interestingly, Tools, Business and Medical apps are also at the top.
###Code
import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.graph_objs as go
# Print the total number of unique categories
num_categories = len(apps['Category'].unique())
print('Number of categories = ', num_categories)
# Count the number of apps in each 'Category' and sort them in descending order
num_apps_in_category = apps['Category'].value_counts().sort_values(ascending = False)
data = [go.Bar(
x = num_apps_in_category.index, # index = category name
y = num_apps_in_category.values, # value = count
)]
plotly.offline.iplot(data)
###Output
_____no_output_____
###Markdown
4. Distribution of app ratingsAfter having witnessed the market share for each category of apps, let's see how all these apps perform on an average. App ratings (on a scale of 1 to 5) impact the discoverability, conversion of apps as well as the company's overall brand image. Ratings are a key performance indicator of an app.From our research, we found that the average volume of ratings across all app categories is 4.17. The histogram plot is skewed to the right indicating that the majority of the apps are highly rated with only a few exceptions in the low-rated apps.
###Code
# Average rating of apps
avg_app_rating = apps['Rating'].mean()
print('Average app rating = ', avg_app_rating)
# Distribution of apps according to their ratings
data = [go.Histogram(
x = apps['Rating']
)]
# Vertical dashed line to indicate the average app rating
layout = {'shapes': [{
'type' :'line',
'x0': avg_app_rating,
'y0': 0,
'x1': avg_app_rating,
'y1': 1000,
'line': { 'dash': 'dashdot'}
}]
}
plotly.offline.iplot({'data': data, 'layout': layout})
###Output
Average app rating = 4.173243045387994
###Markdown
5. Size and price of an appLet's now examine app size and app price. For size, if the mobile app is too large, it may be difficult and/or expensive for users to download. Lengthy download times could turn users off before they even experience your mobile app. Plus, each user's device has a finite amount of disk space. For price, some users expect their apps to be free or inexpensive. These problems compound if the developing world is part of your target market; especially due to internet speeds, earning power and exchange rates.How can we effectively come up with strategies to size and price our app?Does the size of an app affect its rating? Do users really care about system-heavy apps or do they prefer light-weighted apps? Does the price of an app affect its rating? Do users always prefer free apps over paid apps?We find that the majority of top rated apps (rating over 4) range from 2 MB to 20 MB. We also find that the vast majority of apps price themselves under \$10.
###Code
%matplotlib inline
import seaborn as sns
sns.set_style("darkgrid")
import warnings
warnings.filterwarnings("ignore")
# Subset for categories with at least 250 apps
large_categories = apps.groupby(['Category']).filter(lambda x: len(x) >= 250).reset_index()
# Plot size vs. rating
plt1 = sns.jointplot(x = large_categories['Size'], y = large_categories['Rating'], kind = 'hex')
# Subset out apps whose type is 'Paid'
paid_apps = apps[apps['Type'] == 'Paid']
# Plot price vs. rating
plt2 = sns.jointplot(x = paid_apps['Size'], y = paid_apps['Rating'])
###Output
_____no_output_____
###Markdown
6. Relation between app category and app priceSo now comes the hard part. How are companies and developers supposed to make ends meet? What monetization strategies can companies use to maximize profit? The costs of apps are largely based on features, complexity, and platform.There are many factors to consider when selecting the right pricing strategy for your mobile app. It is important to consider the willingness of your customer to pay for your app. A wrong price could break the deal before the download even happens. Potential customers could be turned off by what they perceive to be a shocking cost, or they might delete an app they’ve downloaded after receiving too many ads or simply not getting their money's worth.Different categories demand different price ranges. Some apps that are simple and used daily, like the calculator app, should probably be kept free. However, it would make sense to charge for a highly-specialized medical app that diagnoses diabetic patients. Below, we see that Medical and Family apps are the most expensive. Some medical apps extend even up to \$80! All game apps are reasonably priced below \$20.
###Code
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Select a few popular app categories
popular_app_cats = apps[apps.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY',
'MEDICAL', 'TOOLS', 'FINANCE',
'LIFESTYLE','BUSINESS'])]
# Examine the price trend by plotting Price vs Category
ax = sns.stripplot(x = popular_app_cats['Price'], y = popular_app_cats['Category'], jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories')
# Apps whose Price is greater than 200
apps_above_200 = popular_app_cats[['Category', 'App', 'Price']][popular_app_cats['Price'] > 200]
apps_above_200
###Output
_____no_output_____
###Markdown
7. Filter out "junk" appsIt looks like a bunch of the really expensive apps are "junk" apps. That is, apps that don't really have a purpose. Some app developer may create an app called I Am Rich Premium or most expensive app (H) just for a joke or to test their app development skills. Some developers even do this with malicious intent and try to make money by hoping people accidentally click purchase on their app in the store.Let's filter out these junk apps and re-do our visualization. The distribution of apps under \$20 becomes clearer.
###Code
# Select apps priced below $100
apps_under_100 = popular_app_cats[popular_app_cats['Price'] < 100]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Examine price vs category with the authentic apps
ax = sns.stripplot(x=apps_under_100['Price'], y=apps_under_100['Category'], data=apps_under_100,
jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories after filtering for junk apps')
###Output
_____no_output_____
###Markdown
8. Popularity of paid apps vs free appsFor apps in the Play Store today, there are five types of pricing strategies: free, freemium, paid, paymium, and subscription. Let's focus on free and paid apps only. Some characteristics of free apps are:Free to download.Main source of income often comes from advertisements.Often created by companies that have other products and the app serves as an extension of those products.Can serve as a tool for customer retention, communication, and customer service.Some characteristics of paid apps are:Users are asked to pay once for the app to download and use it.The user can't really get a feel for the app before buying it.Are paid apps installed as much as free apps? It turns out that paid apps have a relatively lower number of installs than free apps, though the difference is not as stark as I would have expected!
###Code
trace0 = go.Box(
# Data for paid apps
y=apps[apps['Type'] == 'Paid']['Installs'],
name = 'Paid'
)
trace1 = go.Box(
# Data for free apps
y=apps[apps['Type'] == 'Free']['Installs'],
name = 'Free'
)
layout = go.Layout(
title = "Number of downloads of paid apps vs. free apps",
yaxis = dict(
type = 'log',
autorange = True
)
)
# Add trace0 and trace1 to a list for plotting
data = [trace0, trace1]
plotly.offline.iplot({'data': data, 'layout': layout})
###Output
_____no_output_____
###Markdown
9. Sentiment analysis of user reviewsMining user review data to determine how people feel about your product, brand, or service can be done using a technique called sentiment analysis. User reviews for apps can be analyzed to identify if the mood is positive, negative or neutral about that app. For example, positive words in an app review might include words such as 'amazing', 'friendly', 'good', 'great', and 'love'. Negative words might be words like 'malware', 'hate', 'problem', 'refund', and 'incompetent'.By plotting sentiment polarity scores of user reviews for paid and free apps, we observe that free apps receive a lot of harsh comments, as indicated by the outliers on the negative y-axis. Reviews for paid apps appear never to be extremely negative. This may indicate something about app quality, i.e., paid apps being of higher quality than free apps on average. The median polarity score for paid apps is a little higher than free apps, thereby syncing with our previous observation.In this notebook, we analyzed over ten thousand apps from the Google Play Store. We can use our findings to inform our decisions should we ever wish to create an app ourselves.
###Code
# Load user_reviews.csv
reviews_df = pd.read_csv('datasets/user_reviews.csv')
# Join and merge the two dataframe
merged_df = pd.merge(apps, reviews_df, on = 'App', how = "inner")
# Drop NA values from Sentiment and Translated_Review columns
merged_df = merged_df.dropna(subset=['Sentiment', 'Translated_Review'])
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
# User review sentiment polarity for paid vs. free apps
ax = sns.boxplot(x = 'Type', y = 'Sentiment_Polarity', data = merged_df)
ax.set_title('Sentiment Polarity Distribution')
###Output
_____no_output_____ |
docs/source/examples/Widget Custom.ipynb | ###Markdown
[Index](Index.ipynb) - [Back](Widget Styling.ipynb) - [Next](Widget Asynchronous.ipynb)
###Code
from __future__ import print_function
###Output
_____no_output_____
###Markdown
Building a Custom Widget - Hello World The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).To create a custom widget, you need to define the widget both in the browser and in the python kernel. Building a Custom Widget To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets. Python Kernel DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.Instead, you must tell it yourself by defining specially named trait attributes, `_view_name`, `_view_module`, and `_view_module_version` (as seen below) and optionally `_model_name` and `_model_module`.
###Code
import ipywidgets as widgets
from traitlets import Unicode, validate
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
###Output
_____no_output_____
###Markdown
sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, the browser would have no knowledge of `_view_name` or `_view_module`. Other traitlet types Unicode, used for `_view_name`, is not the only Traitlet type, there are many more some of which are listed below: - Any- Bool- Bytes- CBool- CBytes- CComplex- CFloat- CInt- CLong- CRegExp- CUnicode- CaselessStrEnum- Complex- Dict- DottedObjectName- Enum- Float- FunctionType- Instance- InstanceType- Int- List- Long- Set- TCPAddress- Tuple- Type- Unicode- UnionNot all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. Front end (JavaScript) Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. Import @jupyter-widgets/base You first need to import the `@jupyter-widgets/base` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below).
###Code
%%javascript
define('hello', ["@jupyter-widgets/base"], function(widgets) {
});
###Output
_____no_output_____
###Markdown
Define the view Next, define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method.
###Code
%%javascript
require.undef('hello');
define('hello', ["@jupyter-widgets/base"], function(widgets) {
// Define the HelloView
var HelloView = widgets.DOMWidgetView.extend({
});
return {
HelloView: HelloView
}
});
###Output
_____no_output_____
###Markdown
Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view.
###Code
%%javascript
require.undef('hello');
define('hello', ["@jupyter-widgets/base"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.el.textContent = 'Hello World!';
},
});
return {
HelloView: HelloView
};
});
###Output
_____no_output_____
###Markdown
Test You should be able to display your widget just like any other widget now.
###Code
HelloWidget()
###Output
_____no_output_____
###Markdown
Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact.
###Code
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
value = Unicode('Hello World!').tag(sync=True)
###Output
_____no_output_____
###Markdown
Accessing the model from the view To access the model associated with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method, which allows you to listen to events triggered by the model (like value changes). Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
###Code
%%javascript
require.undef('hello');
define('hello', ["@jupyter-widgets/base"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below).
###Code
%%javascript
require.undef('hello');
define('hello', ["@jupyter-widgets/base"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Test
###Code
w = HelloWidget()
w
w.value = 'test'
###Output
_____no_output_____
###Markdown
[Index](Index.ipynb) - [Back](Widget Styling.ipynb)
###Code
from __future__ import print_function
###Output
_____no_output_____
###Markdown
Building a Custom Widget - Hello World The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).To create a custom widget, you need to define the widget both in the browser and in the python kernel. Building a Custom Widget To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets. Python Kernel DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.Instead, you must tell it yourself by defining specially named trait attributes, `_view_name` and `_view_module` (as seen below) and optionally `_model_name` and `_model_module`.
###Code
import ipywidgets as widgets
from traitlets import Unicode, validate
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
###Output
_____no_output_____
###Markdown
sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, the browser would have no knowledge of `_view_name` or `_view_module`. Other traitlet types Unicode, used for _view_name, is not the only Traitlet type, there are many more some of which are listed below: - Any- Bool- Bytes- CBool- CBytes- CComplex- CFloat- CInt- CLong- CRegExp- CUnicode- CaselessStrEnum- Complex- Dict- DottedObjectName- Enum- Float- FunctionType- Instance- InstanceType- Int- List- Long- Set- TCPAddress- Tuple- Type- Unicode- UnionNot all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. Front end (JavaScript) Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. Import jupyter-js-widgets You first need to import the `jupyter-js-widgets` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below).
###Code
%%javascript
define('hello', ["jupyter-js-widgets"], function(widgets) {
});
###Output
_____no_output_____
###Markdown
Define the view Next define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
// Define the HelloView
var HelloView = widgets.DOMWidgetView.extend({
});
return {
HelloView: HelloView
}
});
###Output
_____no_output_____
###Markdown
Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.el.textContent = 'Hello World!';
},
});
return {
HelloView: HelloView
};
});
###Output
_____no_output_____
###Markdown
Test You should be able to display your widget just like any other widget now.
###Code
HelloWidget()
###Output
_____no_output_____
###Markdown
Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact.
###Code
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
value = Unicode('Hello World!').tag(sync=True)
###Output
_____no_output_____
###Markdown
Accessing the model from the view To access the model associate with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method which allows you to listen to events triggered by the model (like value changes). Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below).
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Test
###Code
w = HelloWidget()
w
w.value = 'test'
###Output
_____no_output_____
###Markdown
[Index](Index.ipynb) - [Back](Widget Styling.ipynb) - [Next](Widget Asynchronous.ipynb)
###Code
from __future__ import print_function
###Output
_____no_output_____
###Markdown
Building a Custom Widget - Email widget The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).To create a custom widget, you need to define the widget both in the browser and in the python kernel. Building a Custom Widget To get started, you'll create a simple email widget. Python Kernel DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.Instead, you must tell it yourself by defining specially named trait attributes, `_view_name`, `_view_module`, and `_view_module_version` (as seen below) and optionally `_model_name` and `_model_module`.
###Code
from traitlets import Unicode, Bool, validate, TraitError
from ipywidgets import DOMWidget, register
@register
class Email(DOMWidget):
_view_name = Unicode('EmailView').tag(sync=True)
_view_module = Unicode('email_widget').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
###Output
_____no_output_____
###Markdown
sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, attributes of the widget won't be synchronized with the front-end. Other traitlet types Unicode, used for `_view_name`, is not the only Traitlet type, there are many more some of which are listed below: - Any- Bool- Bytes- CBool- CBytes- CComplex- CFloat- CInt- CLong- CRegExp- CUnicode- CaselessStrEnum- Complex- Dict- DottedObjectName- Enum- Float- FunctionType- Instance- InstanceType- Int- List- Long- Set- TCPAddress- Tuple- Type- Unicode- UnionNot all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. Front end (JavaScript) Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. Import @jupyter-widgets/base You first need to import the `@jupyter-widgets/base` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below).
###Code
%%javascript
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
});
###Output
_____no_output_____
###Markdown
Define the view Next, define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
// Define the EmailView
var EmailView = widgets.DOMWidgetView.extend({
});
return {
EmailView: EmailView
}
});
###Output
_____no_output_____
###Markdown
Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = '[email protected]';
this.email_input.disabled = true;
this.el.appendChild(this.email_input);
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
Test You should be able to display your widget just like any other widget now.
###Code
Email()
###Output
_____no_output_____
###Markdown
Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "[email protected]" email address, it will display an address set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact. We want to be able to avoid user to write an invalid email address, so we need a validator using traitlets.
###Code
from traitlets import Unicode, Bool, validate, TraitError
from ipywidgets import DOMWidget, register
@register
class Email(DOMWidget):
_view_name = Unicode('EmailView').tag(sync=True)
_view_module = Unicode('email_widget').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
# Attributes
value = Unicode('[email protected]', help="The email value.").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes.").tag(sync=True)
# Basic validator for the email value
@validate('value')
def _valid_value(self, proposal):
if proposal['value'].count("@") != 1:
raise TraitError('Invalid email value: it must contain an "@" character')
if proposal['value'].count(".") == 0:
raise TraitError('Invalid email value: it must contain at least one "." character')
return proposal['value']
###Output
_____no_output_____
###Markdown
Accessing the model from the view To access the model associated with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method, which allows you to listen to events triggered by the model (like value changes). Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
},
});
return {
EmailView: EmailView
};
});
Email(value='[email protected]', disabled=True)
###Output
_____no_output_____
###Markdown
Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below).
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
// Python -> JavaScript update
this.model.on('change:value', this.value_changed, this);
this.model.on('change:disabled', this.disabled_changed, this);
},
value_changed: function() {
this.email_input.value = this.model.get('value');
},
disabled_changed: function() {
this.email_input.disabled = this.model.get('disabled');
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
This allows us to update the value from the Python kernel to the views. Now to get the value updated from the front-end to the Python kernel (when the input is not disabled) we can do it using the `model.set` method.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
// Python -> JavaScript update
this.model.on('change:value', this.value_changed, this);
this.model.on('change:disabled', this.disabled_changed, this);
// JavaScript -> Python update
this.email_input.onchange = this.input_changed.bind(this);
},
value_changed: function() {
this.email_input.value = this.model.get('value');
},
disabled_changed: function() {
this.email_input.disabled = this.model.get('disabled');
},
input_changed: function() {
this.model.set('value', this.email_input.value);
this.model.save_changes();
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
Test
###Code
email = Email(value='[email protected]', disabled=False)
email
email.value
email.value = '[email protected]'
###Output
_____no_output_____
###Markdown
[Index](Index.ipynb) - [Back](Widget Styling.ipynb)
###Code
from __future__ import print_function
###Output
_____no_output_____
###Markdown
Building a Custom Widget - Hello World The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).To create a custom widget, you need to define the widget both in the browser and in the python kernel. Building a Custom Widget To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets. Python Kernel DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.Instead, you must tell it yourself by defining specially named trait attributes, `_view_name` and `_view_module` (as seen below) and optionally `_model_name` and `_model_module`.
###Code
import ipywidgets as widgets
from traitlets import Unicode, validate
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
###Output
_____no_output_____
###Markdown
sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, the browser would have no knowledge of `_view_name` or `_view_module`. Other traitlet types Unicode, used for `_view_name`, is not the only Traitlet type, there are many more some of which are listed below: - Any- Bool- Bytes- CBool- CBytes- CComplex- CFloat- CInt- CLong- CRegExp- CUnicode- CaselessStrEnum- Complex- Dict- DottedObjectName- Enum- Float- FunctionType- Instance- InstanceType- Int- List- Long- Set- TCPAddress- Tuple- Type- Unicode- UnionNot all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. Front end (JavaScript) Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. Import jupyter-js-widgets You first need to import the `jupyter-js-widgets` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below).
###Code
%%javascript
define('hello', ["jupyter-js-widgets"], function(widgets) {
});
###Output
_____no_output_____
###Markdown
Define the view Next, define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
// Define the HelloView
var HelloView = widgets.DOMWidgetView.extend({
});
return {
HelloView: HelloView
}
});
###Output
_____no_output_____
###Markdown
Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.el.textContent = 'Hello World!';
},
});
return {
HelloView: HelloView
};
});
###Output
_____no_output_____
###Markdown
Test You should be able to display your widget just like any other widget now.
###Code
HelloWidget()
###Output
_____no_output_____
###Markdown
Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact.
###Code
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
value = Unicode('Hello World!').tag(sync=True)
###Output
_____no_output_____
###Markdown
Accessing the model from the view To access the model associated with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method, which allows you to listen to events triggered by the model (like value changes). Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below).
###Code
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
###Output
_____no_output_____
###Markdown
Test
###Code
w = HelloWidget()
w
w.value = 'test'
###Output
_____no_output_____
###Markdown
[Index](Index.ipynb) - [Back](Widget Styling.ipynb) - [Next](Widget Asynchronous.ipynb)
###Code
from __future__ import print_function
###Output
_____no_output_____
###Markdown
Building a Custom Widget - Email widget The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).To create a custom widget, you need to define the widget both in the browser and in the python kernel. Building a Custom Widget To get started, you'll create a simple email widget. Python Kernel DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.Instead, you must tell it yourself by defining specially named trait attributes, `_view_name`, `_view_module`, and `_view_module_version` (as seen below) and optionally `_model_name` and `_model_module`.
###Code
from traitlets import Unicode, Bool, validate, TraitError
from ipywidgets import DOMWidget, register
@register
class Email(DOMWidget):
_view_name = Unicode('EmailView').tag(sync=True)
_view_module = Unicode('email_widget').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
###Output
_____no_output_____
###Markdown
sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, attributes of the widget won't be synchronized with the front-end. Other traitlet types Unicode, used for `_view_name`, is not the only Traitlet type, there are many more some of which are listed below: - Any- Bool- Bytes- CBool- CBytes- CComplex- CFloat- CInt- CLong- CRegExp- CUnicode- CaselessStrEnum- Complex- Dict- DottedObjectName- Enum- Float- FunctionType- Instance- InstanceType- Int- List- Long- Set- TCPAddress- Tuple- Type- Unicode- UnionNot all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. Front end (JavaScript) Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. Import @jupyter-widgets/base You first need to import the `@jupyter-widgets/base` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below).
###Code
%%javascript
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
});
###Output
_____no_output_____
###Markdown
Define the view Next, define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
// Define the EmailView
var EmailView = widgets.DOMWidgetView.extend({
});
return {
EmailView: EmailView
}
});
###Output
_____no_output_____
###Markdown
Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = '[email protected]';
this.email_input.disabled = true;
this.el.appendChild(this.email_input);
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
Test You should be able to display your widget just like any other widget now.
###Code
Email()
###Output
_____no_output_____
###Markdown
Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "[email protected]" email address, it will display an address set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact. We want to be able to avoid user to write an invalid email address, so we need a validator using traitlets.
###Code
from traitlets import Unicode, Bool, validate, TraitError
from ipywidgets import DOMWidget, register
@register
class Email(DOMWidget):
_view_name = Unicode('EmailView').tag(sync=True)
_view_module = Unicode('email_widget').tag(sync=True)
_view_module_version = Unicode('0.1.0').tag(sync=True)
# Attributes
value = Unicode('[email protected]', help="The email value.").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes.").tag(sync=True)
# Basic validator for the email value
@validate('value')
def _valid_value(self, proposal):
if proposal['value'].count("@") != 1:
raise TraitError('Invalid email value: it must contain an "@" character')
if proposal['value'].count(".") == 0:
raise TraitError('Invalid email value: it must contain at least one "." character')
return proposal['value']
###Output
_____no_output_____
###Markdown
Accessing the model from the view To access the model associated with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method, which allows you to listen to events triggered by the model (like value changes). Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
},
});
return {
EmailView: EmailView
};
});
Email(value='[email protected]', disabled=True)
###Output
_____no_output_____
###Markdown
Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below).
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
// Python -> JavaScript update
this.model.on('change:value', this.value_changed, this);
this.model.on('change:disabled', this.disabled_changed, this);
},
value_changed: function() {
this.email_input.value = this.model.get('value');
},
disabled_changed: function() {
this.email_input.disabled = this.model.get('disabled');
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
This allows us to update the value from the Python kernel to the views. Now to get the value updated from the front-end to the Python kernel (when the input is not disabled) we can do it using the `model.set` method.
###Code
%%javascript
require.undef('email_widget');
define('email_widget', ["@jupyter-widgets/base"], function(widgets) {
var EmailView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.email_input = document.createElement('input');
this.email_input.type = 'email';
this.email_input.value = this.model.get('value');
this.email_input.disabled = this.model.get('disabled');
this.el.appendChild(this.email_input);
// Python -> JavaScript update
this.model.on('change:value', this.value_changed, this);
this.model.on('change:disabled', this.disabled_changed, this);
// JavaScript -> Python update
this.email_input.onchange = this.input_changed.bind(this);
},
value_changed: function() {
this.email_input.value = this.model.get('value');
},
disabled_changed: function() {
this.email_input.disabled = this.model.get('disabled');
},
input_changed: function() {
this.model.set('value', this.email_input.value);
this.model.save_changes();
},
});
return {
EmailView: EmailView
};
});
###Output
_____no_output_____
###Markdown
Test
###Code
email = Email(value='[email protected]', disabled=False)
email
email.value
email.value = '[email protected]'
###Output
_____no_output_____ |
dev/30_text_core.ipynb | ###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return first(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text]))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, encoding='utf8', **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = partial(Path.read, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
#export
def read_tokenized_file(f): return L(f.read().split(' '))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return first(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text]))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_data_block.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
This cell doesn't have an export destination and was ignored:
e
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return first(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text]))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, encoding='utf8', **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = partial(Path.read, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
#export
def read_tokenized_file(f): return L(f.read().split(' '))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Multiprocessing
###Code
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).map(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
try:
for _ in progress_bar(items, leave=False): yield queue.get()
#res = []
#for _ in progress_bar(items, leave=False):
# res.append(queue.get())
#res = (queue.get() for _ in progress_bar(items, leave=False))
#try: return res #if as_gen else [o[1] for o in sorted(res)]
except Exception as e: print(e)
finally:
for p in processes: p.join()
###Output
_____no_output_____
###Markdown
`cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
###Code
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = list(parallel_gen(SleepyBatchFunc, x, n_workers=2))
test_eq([o[1] for o in sorted(res)], x+1)
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).map(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
#outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
outputs = L([o[1] for o in sorted(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))])
lengths = outputs.map(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage. Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_data_block.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
This cell doesn't have an export destination and was ignored:
e
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_data_block.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
This cell doesn't have an export destination and was ignored:
e
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Multiprocessing
###Code
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).mapped(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
res = (queue.get() for _ in progress_bar(items, leave=False))
try: return res if as_gen else [o[1] for o in sorted(res)]
finally:
for p in processes: p.join()
###Output
_____no_output_____
###Markdown
`cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
###Code
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = parallel_gen(SleepyBatchFunc, x, n_workers=2)
test_eq(res, x+1)
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).mapped(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
lengths = outputs.mapped(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage. Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return first(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text]))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
fn_counter_pkl = 'counter.pkl'
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, encoding='utf8', **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = partial(Path.read, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
counter.update(tok)
(output_dir/fn_counter_pkl).save(counter)
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#hide
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'text1': texts}, columns=['text', 'text1'])
col = _join_texts(df, mark_fields=True)
for i in range(len(df)):
test_eq(col[i], f'{FLD} 1 This is an example of text {i} {FLD} 2 This is an example of text {i}')
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'] = outputs
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
#export
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
outname.with_suffix('.pkl').save(cnt)
#export
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = out[txt_col].str.split(' ')
return out,fname.with_suffix('.pkl').load()
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
def _prepare_texts(tmp_d):
"Prepare texts in a folder struct in tmp_d, a csv file and returns a dataframe"
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
texts = [f"This is an example of text {d} {i}" for i in range(5) for d in ['a', 'b', 'c']]
df = pd.DataFrame({'text': texts, 'label': list(range(15))}, columns=['text', 'label'])
csv_fname = tmp_d/'input.csv'
df.to_csv(csv_fname, index=False)
return path,df,csv_fname
with tempfile.TemporaryDirectory() as tmp_d:
path,df,csv_fname = _prepare_texts(Path(tmp_d))
#Tokenize as folders
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
for d in ['a', 'b', 'c']:
p = outp/d
for i in range(5):
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
cnt_a = (outp/fn_counter_pkl).load()
test_eq(cnt_a['this'], 15)
test_eq(cnt_a['a'], 5)
test_eq(cnt_a['0'], 3)
#Tokenize as a dataframe
out,cnt_b = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text'])
test_eq(out['label'].values, df['label'].values)
test_eq(out['text'], [(outp/d/f'text{i}.txt').read().split(' ') for i in range(5) for d in ['a', 'b', 'c']])
test_eq(cnt_a, cnt_b)
#Tokenize as a csv
out_fname = Path(tmp_d)/'output.csv'
tokenize_csv(csv_fname, text_cols='text', outname=out_fname)
test_eq((out,cnt_b), load_tokenized_csv(out_fname))
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def __call__(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-Capture either a whitespace character or the beginning of text
(\w+) Capture any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Capture a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Capture last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Capture either a whitespace or the beginning of text
([A-Z]+ Capture one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space or end of text
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Capture either a whitespace or the beginning of text
([A-Z] Capture exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(items, batch_size=self.buf_sz))
tok = SpacyTokenizer()
inp,exp = "This isn't the easiest text.",["This", "is", "n't", "the", "easiest", "text", "."]
test_eq(L(tok([inp]*5)), [exp]*5)
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
import tempfile
with tempfile.TemporaryDirectory() as tmp_d:
path = Path(tmp_d)/'tmp'
path.mkdir()
for d in ['a', 'b', 'c']:
(path/d).mkdir()
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path(tmp_d)/'tmp_tok'
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,outputs.map(len)
return res,Counter(outputs.concat())
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
print(cnt)
print(out)
out.text[0]
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=50000, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
def load_tokenized_csv(fname):
parent_d = Path(fname).parent
cnt = pickle.load(open(d/'counter.pkl', 'rb'))
out = pd.read_csv(out_fname)
out['text'] = out.text.apply(eval)
return out, cnt
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
###Code
with tempfile.TemporaryDirectory() as tmp_d:
d = Path(tmp_d)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
in_fname = d/'input.csv'
df.to_csv(in_fname, index=False)
out_fname = d/'output.csv'
tokenize_csv(in_fname, text_cols='text', outname=out_fname)
out, cnt = load_tokenized_csv(out_fname)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
with tempfile.TemporaryDirectory() as tmp_d:
d = Path(tmp_d)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
df['text1'] = df['text'].values
in_fname = d/'input.csv'
df.to_csv(in_fname, index=False)
out_fname = d/'output.csv'
tokenize_csv(in_fname, text_cols=['text', 'text1'], outname=out_fname)
out, cnt = load_tokenized_csv(out_fname)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
tokenize_csv()
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
###Output
_____no_output_____
###Markdown
Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
_____no_output_____
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Multiprocessing
###Code
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).mapped(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
res = (queue.get() for _ in progress_bar(items, leave=False))
try: return res if as_gen else [o[1] for o in sorted(res)]
finally:
for p in processes: p.join()
###Output
_____no_output_____
###Markdown
`cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
###Code
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = parallel_gen(SleepyBatchFunc, x, n_workers=2)
test_eq(res, x+1)
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).mapped(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, include=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, include=include)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
lengths = outputs.mapped(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage. Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 01b_script.ipynb.
Converted 01c_dataloader.ipynb.
Converted 02_data_transforms.ipynb.
Converted 03_data_pipeline.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_source.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_test_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Multiprocessing
###Code
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).mapped(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
res = (queue.get() for _ in progress_bar(items, leave=False))
try: return res if as_gen else [o[1] for o in sorted(res)]
finally:
for p in processes: p.join()
###Output
_____no_output_____
###Markdown
`cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
###Code
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = parallel_gen(SleepyBatchFunc, x, n_workers=2)
test_eq(res, x+1)
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).mapped(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
lengths = outputs.mapped(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage. Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 01b_script.ipynb.
Converted 01c_dataloader.ipynb.
Converted 02_data_transforms.ipynb.
Converted 03_data_pipeline.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_source.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Text core> Basic function to preprocess text before assembling it in a `DataBunch`.
###Code
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
###Output
_____no_output_____
###Markdown
Multiprocessing
###Code
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).mapped(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
res = (queue.get() for _ in progress_bar(items, leave=False))
try: return res if as_gen else [o[1] for o in sorted(res)]
finally:
for p in processes: p.join()
###Output
_____no_output_____
###Markdown
`cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
###Code
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = parallel_gen(SleepyBatchFunc, x, n_workers=2)
test_eq(res, x+1)
###Output
_____no_output_____
###Markdown
Preprocessing rules The following are rules applied to texts before or after it's tokenized.
###Code
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
_all_ = ['UNK', 'PAD', 'BOS', 'EOS', 'FLD', 'TK_REP', 'TK_WREP', 'TK_UP', 'TK_MAJ']
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
test_eq(rm_useless_spaces('a b c'), 'a b c')
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -> TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same character or more.
###Code
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -> TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
###Output
_____no_output_____
###Markdown
It starts replacing at 3 repetitions of the same word or more.
###Code
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("Jeremy Howard"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
###Output
_____no_output_____
###Markdown
Tokenizing A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
###Code
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).mapped(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
###Output
_____no_output_____
###Markdown
The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
###Code
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
###Output
_____no_output_____
###Markdown
Tokenize texts in files Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
###Code
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, include=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, include=include)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
###Output
_____no_output_____
###Markdown
The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.`extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
###Code
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
###Output
_____no_output_____
###Markdown
Tokenize texts in a dataframe
###Code
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
lengths = outputs.mapped(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
###Output
_____no_output_____
###Markdown
This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
###Code
out.text[0]
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.`tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage. Sentencepiece
###Code
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_dataloader.ipynb.
Converted 01a_script.ipynb.
Converted 02_transforms.ipynb.
Converted 03_pipeline.ipynb.
Converted 04_data_external.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_source.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_rect_augment.ipynb.
Converted 10_data_block.ipynb.
Converted 11_layers.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_test_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 60_vision_models_xresnet.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_synth_learner.ipynb.
Converted notebook2jekyll.ipynb.
|
Logistic Modeling of COVID-19 Confirmed Cases.ipynb | ###Markdown
Logistic Modeling of COVID-19 Confirmed CasesI compared the accuracy of two sigmoid models: [simple logistic function](https://en.wikipedia.org/wiki/Logistic_function) and [Gompertz function](https://en.wikipedia.org/wiki/Gompertz_function), and found the Gompertz function to be a fairly accurate short-term predictor of future confirmed cases. Defining our parameters and loading the dataI looked at the confirmed and fatal cases for Korea through March 18 since Kenya had just reported its first case hence less data available.
###Code
ESTIMATE_DAYS = 3
data_key = 'KR'
date_limit = '2020-03-18'
import pandas as pd
import seaborn as sns
sns.set()
df = pd.read_csv(f'https://storage.googleapis.com/covid19-open-data/v2/{data_key}/main.csv').set_index('date')
###Output
_____no_output_____
###Markdown
Looking at the outbreak *outbreak* is whenever the number of cases exceeded certain threshold. In this case, I used 10.
###Code
def get_outbreak_mask(data: pd.DataFrame, threshold: int = 10):
''' Returns a mask for > N confirmed cases '''
return data['total_confirmed'] > threshold
cols = ['total_confirmed', 'total_deceased']
# Get data only for the columns we care about
df = df[cols]
# Get data only for the selected dates
df = df[df.index <= date_limit]
# Get data only after the outbreak begun
df = df[get_outbreak_mask(df)]
###Output
_____no_output_____
###Markdown
Plotting the data
###Code
df.plot(kind='bar', figsize=(16, 8));
###Output
_____no_output_____
###Markdown
Modeling the dataBy the time the outbreak is discovered, there are many undiagnosed (and even asymptomatic) cases which lead to very rapid initial growth; later on, after a combination of aggressive measures to avoid further spread and immunity developed by potential hosts, the growth becomes much slower.
###Code
import math
import numpy as np
from scipy import optimize
def logistic_function(x: float, a: float, b: float, c: float):
''' 1 / (1 + e^-x) '''
return a / (1.0 + np.exp(-b * (x - c)))
X, y = list(range(len(df))), df['total_confirmed'].tolist()
# Providing a reasonable initial guess is crucial for this model
params, _ = optimize.curve_fit(logistic_function, X, y, maxfev=int(1E5), p0=[max(y), 1, np.median(X)])
print('Estimated function: {0:.3f} / (1 + e^({1:.3f} * (X - {2:.3f}))'.format(*params))
confirmed = df[['total_confirmed']].rename(columns={'total_confirmed': 'Ground Truth'})
ax = confirmed.plot(kind='bar', figsize=(16, 8))
estimate = [logistic_function(x, *params) for x in X]
ax.plot(df.index, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____
###Markdown
Gompertz function[Gompertz function](https://en.wikipedia.org/wiki/Gompertz_function) is an asymmetric logistic function that has a slower growth decay until the curve goes flat over time. Let's take a look at using this new function to find the best paremeters that fit the data:
###Code
def logistic_function(x: float, a: float, b: float, c: float):
''' a * e^(-b * e^(-cx)) '''
return a * np.exp(-b * np.exp(-c * x))
X, y = list(range(len(df))), df['total_confirmed'].tolist()
# Providing a reasonable initial guess is crucial for this model
params, _ = optimize.curve_fit(logistic_function, X, y, maxfev=int(1E5), p0=[max(y), np.median(X), .1])
print('Estimated function: {0:.3f} * e^(-{1:.3f} * e^(-{2:.3f}X))'.format(*params))
confirmed = df[['total_confirmed']].rename(columns={'total_confirmed': 'Ground Truth'})
ax = confirmed.plot(kind='bar', figsize=(16, 8))
estimate = [logistic_function(x, *params) for x in X]
ax.plot(df.index, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____
###Markdown
Evaluating the modelThat curve looks like a very good fit! *overfitting* occurs which diminishes the model's ability to make predictions. In other words, the model may appear to be able to perfectly follow known data, but when asked to make a prediction about future data it will likely be wrong. This is one of the main reasons why machine learning is not a good tool for this task, since there is not enough data to avoid overfitting a model. Validating the modelI fitted without looking at the last 3 days of data. Then, I estimated the missing days using our model, and verified if the results still hold by comparing what the model thought was going to happen with the actual data.
###Code
params_validate, _ = optimize.curve_fit(logistic_function, X[:-ESTIMATE_DAYS], y[:-ESTIMATE_DAYS])
# Project zero for all values except for the last ESTIMATE_DAYS
projected = [0] * len(X[:-ESTIMATE_DAYS]) + [logistic_function(x, *params_validate) for x in X[-ESTIMATE_DAYS:]]
projected = pd.Series(projected, index=df.index, name='Projected')
confirmed = pd.DataFrame({'Ground Truth': df['total_confirmed'], 'Projected': projected})
ax = confirmed.plot(kind='bar', figsize=(16, 8))
estimate = [logistic_function(x, *params_validate) for x in X]
ax.plot(df.index, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____
###Markdown
Projecting future dataIt looks like our logistic model slightly underestimates the confirmed cases. This indicates that the model is optimistic about the slowdown of new cases being reported. A number of factors could affect this, like wider availability of tests. Ultimately, it is also possible that the logistic model is not an appropriate function to use. However, the predictions are close enough to the real data that this is probably a good starting point for a rough estimate over a short time horizon.Now, let's use the model we fitted earlier which used all the data, and try to predict what the next 3 days will look like.
###Code
import datetime
# Append N new days to our indices
date_format = '%Y-%m-%d'
date_range = [datetime.datetime.strptime(date, date_format) for date in df.index]
for _ in range(ESTIMATE_DAYS): date_range.append(date_range[-1] + datetime.timedelta(days=1))
date_range = [datetime.datetime.strftime(date, date_format) for date in date_range]
# Perform projection with the previously estimated parameters
projected = [0] * len(X) + [logistic_function(x, *params) for x in range(len(X), len(X) + ESTIMATE_DAYS)]
projected = pd.Series(projected, index=date_range, name='Projected')
df_ = pd.DataFrame({'Confirmed': df['total_confirmed'], 'Projected': projected})
ax = df_.plot(kind='bar', figsize=(16, 8))
estimate = [logistic_function(x, *params) for x in range(len(date_range))]
ax.plot(date_range, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____ |
notebooks/20210330__First_trial_of_making_new_z-update-Copy1.ipynb | ###Markdown
Proof of concept of new "composable" ADMM formulation3/30/21This notebook is a proof of concept and understanding of the new ADMM formulation, based on grouping quadratic terms and linear constraints in with the global equality constraint.
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from time import time
import seaborn as sns
import cvxpy as cvx
sns.set_style('darkgrid')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import GaussNoise, SmoothSecondDifference, PiecewiseConstant, SparseFirstDiffConvex, LaplaceNoise, Blank
from osd.generators import proj_l2_d2, make_pwc_data
from osd.utilities import progress
###Output
_____no_output_____
###Markdown
Problem data generation
###Code
T = 1000
X_real = np.zeros((3, T))
X_real[0] = 0.1 * np.random.randn(T)
X_real[1] = proj_l2_d2(np.random.randn(T), theta=5e2) * 2
X_real[2] = make_pwc_data(T, segments=4)
y = np.sum(X_real, axis=0)
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(14, 7))
ax[0].set_title('Smooth component')
ax[0].plot(X_real[1])
ax[1].set_title('PWC component')
ax[1].plot(X_real[2])
ax[2].set_title('Observed signal')
ax[2].plot(y, linewidth=1, marker='.')
# ax[2].plot(signal1 + signal2, label='true signal minus noise', ls='--')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Example 1: Quadratically smooth, plus Gaussian noise
###Code
y = np.sum(X_real[:2], axis=0)
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(14, 7))
ax[0].set_title('Noise component')
ax[0].plot(X_real[0])
ax[1].set_title('Smooth component')
ax[1].plot(X_real[1])
ax[2].set_title('Observed signal')
ax[2].plot(y, linewidth=1, marker='.')
# ax[2].plot(signal1 + signal2, label='true signal minus noise', ls='--')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Solve with CVXPY + MOSEK
###Code
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
components = [c1, c2]
problem = Problem(y, components)
problem.weights.value = [c.theta for c in problem.components]
problem.decompose(admm=False, solver='MOSEK')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est, label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
ax[k].legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Solve with new ADMM formulationThis problem is comprised of quadratic terms and linear constraints, so everything goes in the z-update.We have$$ g(z,\tilde{z};\theta) = \left\lVert z_1 \right\rVert_2^2 + \theta\left\lVert \tilde{z}_2\right\rVert_2^2, $$with$$ \mathbf{dom}\,g = \left\{z,\tilde{z}\mid\sum_{k} z_k = y, D^2 z_2 = \tilde{z}_2\right\}.$$Let $\hat{z} = \left[ \begin{matrix} z_1^T & z_2^T & \tilde{z}_2^T\end{matrix}\right]^T$. Then $g$ can be rewritten as$$ g(\hat{z};P) = \hat{z}^T P \hat{z},$$where $P\in\mathbf{R}^{(3T-2)\times(3T-2)}$ is a diagonal matrix with the first T entries equal to 1, the second T entris equal to 0, and the final T-1 entries equal to $\theta$.
###Code
# Initialization
X = np.zeros((K, T))
X[0] = y
X_tilde = np.zeros(T-2)
Z = np.copy(X)
Z_tilde = np.copy(X_tilde)
U = np.zeros_like(X)
U_tile = np.zeros_like(X_tilde)
d = np.zeros(3 * T - 2)
d[:T] = 1
d[2*T:] = 1e2
P = np.diag(d)
I = np.eye(T)
D = np.diff(I, axis=0, n=2)
F_1 = np.block([np.eye(T), np.eye(T), np.zeros((T, T-2))])
F_2 = np.block([np.zeros((T-2, T)), D, -1 * np.eye(T-2)])
F = np.block([[F_1], [F_2]])
A = np.block([
[P + np.eye(3*T - 2), F.T],
[F, np.zeros((F.shape[0], F.shape[0]))]
])
A.shape
v = np.random.randn(3 * T - 2)
g = np.block([y, np.zeros(T - 2)])
vp = np.block([v, g])
out_test = np.linalg.solve(A, vp)
x = cvx.Variable(3 * T - 2)
objective = cvx.Minimize(
cvx.sum_squares(x[:T]) + 1e2 * cvx.sum_squares(x[2*T:]) + 0.5 * cvx.sum_squares(x - v)
)
constraints = [
x[:T] + x[T:2*T] == y,
cvx.diff(x[T:2*T], k=2) == x[2*T:]
]
problem = cvx.Problem(objective, constraints)
problem.solve()
(cvx.sum_squares(out_test[:T]) + 1e2 * cvx.sum_squares(out_test[2*T:3*T-2]) + 0.5 * cvx.sum_squares(out_test[:3*T-2] - v)).value
plt.plot(x.value)
plt.plot(out_test[:3*T-1])
np.alltrue([
np.alltrue(np.isclose(out_test[:T] + out_test[T:2*T], y)),
np.alltrue(np.isclose(x.value[:T] + x.value[T:2*T], y))
])
np.alltrue([
np.alltrue(np.isclose(np.diff(out_test[T:2*T], n=2), out_test[2*T:3*T-2])),
np.alltrue(np.isclose(np.diff(x.value[T:2*T], n=2), x.value[2*T:]))
])
from scipy import sparse as sp
A_s = sp.csc_matrix(A)
A_s
%timeit sp.csc_matrix(A)
%timeit out_test = np.linalg.solve(A, vp)
%timeit A_factored = sp.linalg.splu(A_s)
A_factored = sp.linalg.splu(A_s)
%timeit sparse_test = A_factored.solve(vp)
A_factored = sp.linalg.splu(A_s)
sparse_test = A_factored.solve(vp)
plt.plot(x.value)
plt.plot(out_test[:3*T-1])
plt.plot(sparse_test[:3*T-1])
def make_consensus_prob(y, X, U):
Z_var = cvx.Variable(X.shape)
X_param = cvx.Parameter(X.shape, value=X, name='X')
U_param = cvx.Parameter(X.shape, value=U, name='U')
y_param = cvx.Parameter(len(y), value=y, name='y')
objective = cvx.Minimize(cvx.norm(Z_var - U_param - X_param, 'fro'))
constraints = [
cvx.sum(Z_var[:-1, :], axis=0) == y_param,
cvx.diff(Z_var[2, :], k=1) == Z_var[3, :-1]
]
problem = cvx.Problem(objective, constraints)
return problem
def calc_obj(y, X, components, use_ix):
"""
Calculate the current objective value of the problem
:param y: numpy array containing problem data
:param X: current estimate of decomposed signal components from ADMM
:param use_ix: the known index set (Boolean array)
:return: the scalar problem objective value
"""
K = len(components)
X_tilde = make_estimate(y, X, use_ix)
obj_val = 0
for k in range(K):
try:
cost = components[k].cost(X_tilde[k]).value.item()
except AttributeError:
cost = components[k].cost(X_tilde[k])
theta = components[k].theta
obj_val += theta * cost
return obj_val
def make_estimate(y, X, use_ix):
"""
After any given iteration of the ADMM algorithm, generate an estimate that
is feasible with respect to the global equality constraint by making x0
equal to the residual between the input data y and the rest of the
component estimates
:param y: numpy array containing problem data
:param X: current estimate of decomposed signal components from ADMM
:param use_ix: the known index set (Boolean array)
:return: the estimate with the first component replaced by the residuals
"""
X_tilde = np.copy(X)
X_tilde[0, use_ix] = y - np.sum(X[1:, use_ix], axis=0)
X_tilde[0, ~use_ix] = 0
return X_tilde
def run_admm(data, components, num_iter=50, rho=1., use_ix=None, verbose=True,
randomize_start=False, X_init=None):
"""
Serial implementation of SD ADMM algorithm.
:param data: numpy array containing problem data
:param components: list of osd.components class objects
:param num_iter: (int) the number of ADMM iterations to perform
:param rho: (float) the ADMM learning rate
:param use_ix: (None or Boolean array) the set of known index values
:param verbose: (Boolean) print progress to screen
:param randomize_start: (Boolean) Randomize initialization of components
:return:
"""
y = data
T = len(data)
K = len(components)
if use_ix is None:
use_ix = np.ones_like(data, dtype=bool)
if X_init is None:
X = np.zeros((K, T))
if not randomize_start:
X[0, use_ix] = y[use_ix]
else:
X[1:, :] = np.random.randn(K-1, T)
X[0, use_ix] = y[use_ix] - np.sum(X[1:, use_ix], axis=0)
elif X_init.shape == (K, T):
X = np.copy(X_init)
else:
m1 = 'A initial value was given for X that does not match the problem shape.'
print(m1)
return
Z = np.copy(X)
U = np.zeros_like(X)
residuals = []
obj_vals = []
ti = time()
consensus_problem = make_consensus_prob(y, X, U)
parameters = {p.name(): p for p in prob.parameters()}
best = {
'X': None,
'u': None,
'it': None,
'obj_val': np.inf
}
try:
for it in range(num_iter):
if verbose:
td = time() - ti
progress(it, num_iter, '{:.2f} sec'.format(td))
# Apply proximal operators for each signal class
for k in range(K):
prox = components[k].prox_op
theta = components[k].theta
X[k, :] = prox(Z[k, :] - U[k, :], theta, rho)
# Consensus step
parameters['X'].value = X
parameters['U'].value = U
consensus_problem.solve(solver='MOSEK')
Z = consensus_problem.variables()[0].value
# U-update
U += X - Z
# mean-square-error
error = np.sum(X[:-1, use_ix], axis=0) - y[use_ix]
mse = np.sum(np.power(error, 2)) / error.size
residuals.append(mse)
obj_val = calc_obj(y, X, components, use_ix)
obj_vals.append(obj_val)
if obj_val < best['obj_val']:
X_tilde = make_estimate(y, X, use_ix)
best = {
'X': X_tilde,
'u': U,
'it': it,
'obj_val': obj_val
}
except error as e:
print("something failed :(")
print(e)
if verbose:
td = time() - ti
progress(it + 1, num_iter, '{:.2f} sec\n'.format(td))
outdict = {
'X': best['X'],
'u': best['u'],
'it': best['it'],
'residuals': residuals,
'obj_vals': obj_vals,
'best_obj': best['obj_val']
}
return outdict
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = Blank()
c4 = LaplaceNoise()
results = run_admm(y, [c1, c2, c3, c4], rho=5, num_iter=500)
plt.plot(results['obj_vals'])
plt.axvline(results['it'], color='red', ls='--')
plt.yscale('log')
K = 3
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = results['X'][k]
ax[k].plot(true, label='true')
ax[k].plot(est, label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(results['X'][1:-1], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
plt.plot(results['X'][2])
###Output
_____no_output_____
###Markdown
Cardinality-constrained formulation Solve with ADMM
###Code
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = PiecewiseConstant(num_segments=4)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)
plt.plot(problem.admm_result['obj_vals'])
plt.axvline(problem.admm_result['it'], color='red', ls='--')
plt.yscale('log')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Solve with CVXPY
###Code
problem.decompose(admm=False)
print([c.is_convex for c in problem.components])
###Output
[True, True, False]
###Markdown
$\ell_1$-penalized formulation Solve with ADMM
###Code
c1 = GaussNoise()
c2 = SmoothSecondDifference(theta=1e2)
c3 = SparseFirstDiffConvex(theta=1e0)
components = [c1, c2, c3]
problem = Problem(y, components)
problem.decompose(admm=True, rho=1, num_iter=100)
plt.plot(problem.admm_result['obj_vals'])
plt.axvline(problem.admm_result['it'], color='red', ls='--')
plt.yscale('log')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Solve with CVXPY
###Code
problem.weights.value = [c.theta for c in problem.components]
problem.decompose(admm=False, solver='MOSEK')
K = len(components)
fig, ax = plt.subplots(nrows=K, sharex=True, figsize=(10,8))
for k in range(K):
if k > 0:
true = X_real[k]
est = problem.estimates[k]
ax[k].plot(true, label='true')
ax[k].plot(est - np.average(est) + np.average(true), label='estimated (mean adj.)')
ax[k].set_title('Component {}'.format(k))
else:
ax[k].plot(np.sum(X_real, axis=0), label='observed', linewidth=1, alpha=0.3, marker='.', color='green')
ax[k].plot(np.sum(X_real[1:], axis=0), label='true')
ax[k].plot(np.sum(problem.estimates[1:], axis=0), label='estimated')
ax[k].set_title('Composed Signal')
ax[k].legend()
plt.tight_layout()
###Output
_____no_output_____ |
examples/analyze_multiple_files.ipynb | ###Markdown
Analyzing data from multiple files
###Code
import glob
import numpy as np
import matplotlib.pyplot as plt
filepath = '../dat/series2d/inflammation*.csv'
print(glob.glob(filepath))
filenames = sorted(glob.glob('../dat/series2d/inflammation*.csv'))
filenames = filenames[:3]
count = 1
for filename in filenames:
data = np.loadtxt(fname = filename, delimiter = ',')
fig = plt.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes2.set_ylabel('maximum')
axes3.set_ylabel('minimum')
axes1.plot(np.mean(data, axis = 0))
axes2.plot(np.max(data, axis = 0))
axes3.plot(np.min(data, axis = 0))
plt.tight_layout()
plt.savefig('../figures/inflammation-' + str(count) + '.png')
count = count + 1
###Output
_____no_output_____ |
2_Elimination_of_outliers/Calculation_and_Visualization_of_refined_SoH.ipynb | ###Markdown
0.Import Packages
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import os
import glob
import seaborn as sns
###Output
_____no_output_____
###Markdown
1. Load Dataset
###Code
dir = 'refined_dataset'
listdir = os.listdir(dir)
print(listdir)
print("The number of dataset :", len(listdir))
num = ['B05', 'B07', 'B18', 'B33', 'B34', 'B46', 'B47', 'B48']
for i in range(len(listdir)):
vector = np.zeros((1,3))
path = os.path.join(os.getcwd(), 'refined_dataset/', num[i] + '_discharge_soh.csv')
csv = pd.read_csv(path)
df = pd.DataFrame(csv)
vec = df[['cycle', 'capacity', 'SOH']]
globals()['data_{}'.format(num[i])] = vec
data = pd.read_csv('refined_dataset/B05_discharge_soh.csv')
df = pd.DataFrame(data)
df
data_B05 ## result
for i in range(len(listdir)):
print("Shape of data :", np.shape(globals()['data_{}'.format(num[i])]))
###Output
Shape of data : (50285, 3)
Shape of data : (50285, 3)
Shape of data : (34866, 3)
Shape of data : (42200, 3)
Shape of data : (45909, 3)
Shape of data : (23478, 3)
Shape of data : (23478, 3)
Shape of data : (23478, 3)
###Markdown
3. Visualiztion of SoH
###Code
for i in range(len(listdir)) :
dff = globals()['data_{}'.format(num[i])]
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(dff['cycle'], dff['SOH'])
# plt.plot(dff['cycle'], len(dff['cycle'])*[0.7], color = 'red')
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('cycle', fontsize = 15)
plt.title('Discharge_' + num[i], fontsize = 15)
plt.savefig('fig/refined_SoH_' + num[i] + '.jpg')
plt.show()
###Output
_____no_output_____
###Markdown
3-1. group_A
###Code
# Group A
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B05['cycle'], data_B05['SOH'],label='B05')
plt.scatter(data_B07['cycle'], data_B07['SOH'],label='B07')
plt.scatter(data_B18['cycle'], data_B18['SOH'],label='B18')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group A', fontsize = 15)
plt.savefig('fig/A_group.jpg')
plt.show()
# Group B
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B33['cycle'], data_B33['SOH'],label='B33')
plt.scatter(data_B34['cycle'], data_B34['SOH'],label='B34')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group B', fontsize = 15)
plt.savefig('fig/B_group.jpg')
plt.show()
# Group C
sns.set_style("darkgrid")
plt.figure(figsize=(12, 8))
plt.scatter(data_B46['cycle'], data_B46['SOH'],label='B46')
plt.scatter(data_B47['cycle'], data_B47['SOH'],label='B47')
plt.scatter(data_B48['cycle'], data_B48['SOH'],label='B48')
plt.legend(prop={'size': 16})
plt.ylabel('SoH', fontsize = 15)
plt.xlabel('Discharge cycle', fontsize = 15)
plt.title('SoH of group C', fontsize = 15)
plt.savefig('fig/C_group.jpg')
plt.show()
###Output
_____no_output_____ |
site/en/tutorials/text/word2vec.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
!pip install tqdm
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss. ```python%tensorboard --logdir logs```  Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
!pip install tqdm
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss. ```python%tensorboard --logdir logs```  Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the TextVectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss.
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook word2vec word2vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through word2vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient estimation of word representations in vector space](https://arxiv.org/pdf/1301.3781.pdf) and [Distributed representations of words and phrases and their compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words:* **Continuous bag-of-words model**: predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous skip-gram model**: predicts words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own word2vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and negative sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of eight words:> The wide road shimmered in the hot sun.The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered a `context word`. Below is a table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of `n` implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words, which are often large (105-107) terms. The [noise contrastive estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) (NCE) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modeling the word distribution, the NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from `num_ns` negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and `num_ns` negative samples. A negative sample is defined as a `(target_word, context_word)` pair such that the `context_word` does not appear in the `window_size` neighborhood of the `target_word`. For the example sentence, these are a few potential negative samples (when `window_size` is `2`).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence:> The wide road shimmered in the hot sun.Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices:
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens:
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence:
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for word2vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here, as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Print a few positive skip-grams:
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: `num_ns` (the number of negative samples per a positive context word) in the `[5, 20]` range is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while `num_ns` in the `[2, 5]` range suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labeled as `1`) and negative samples (labeled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (in the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concatenate a positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label the first context word as `1` (positive) followed by `num_ns` `0`s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape the target to shape `(1,)` and context and label to `(num_ns+1,)`.
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Check out the context and the corresponding labels for the target word from the skip-gram example above:
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling word2vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This diagram summarizes the procedure of generating a training example from a sentence:  Notice that the words `temperature` and `code` are not part of the input sentence. They belong to the vocabulary like certain other indices used in the diagram above. Compile all steps into one function Skip-gram sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to the `skipgrams` function. Inspect the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for `vocab_size` tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in the dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with a positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for word2vec With an understanding of how to work with one sentence for a skip-gram negative sampling based word2vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read the text from the file and print the first few lines:
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for the next steps:
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and the number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the `TextVectorization` layer to normalize, split, and map strings to
# integers. Set the `output_sequence_length` length to pad all samples to the
# same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `TextVectorization.adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `TextVectorization.get_vocabulary`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The `vectorize_layer` can now be used to generate vectors for each element in the `text_ds` (a `tf.data.Dataset`). Apply `Dataset.batch`, `Dataset.prefetch`, `Dataset.map`, and `Dataset.unbatch`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a word2vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples.Note: Since the `generate_training_data()` defined earlier uses non-TensorFlow Python/NumPy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Inspect a few examples from `sequences`:
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data` function defined earlier to generate training examples for the word2vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be the same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your word2vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Apply `Dataset.cache` and `Dataset.prefetch` to improve performance:
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and training The word2vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product multiplication between the embeddings of target and context words to obtain predictions for labels and compute the loss function against true labels in the dataset. Subclassed word2vec model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your word2vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer, which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer, which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final word2vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your word2vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for Tensorboard:
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model on the `dataset` for some number of epochs:
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the word2vec model's accuracy and loss:
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `Model.get_layer` and `Layer.get_weights`. The `TextVectorization.get_vocabulary` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata files:
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/):
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model
from tensorflow.keras.layers import Dot, Embedding, Flatten
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3, 2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
word_emb = self.target_embedding(target)
context_emb = self.context_embedding(context)
dots = self.dots([context_emb, word_emb])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss.
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
!pip install tqdm
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss. ```python%tensorboard --logdir logs```  Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = layers.experimental.preprocessing.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss.
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model
from tensorflow.keras.layers import Dot, Embedding, Flatten
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3, 2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss. ```python%tensorboard --logdir logs```  Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss.
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are a few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the TextVectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss.
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Word2Vec Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and[DistributedRepresentations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words: * **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and Negative Sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of 8 words.> The wide road shimmered in the hot sun. The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (105-107) terms. The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples. A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
!pip install tqdm
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence: `The wide road shimmered in the hot sun.`Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices.
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens.
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence.
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Take a look at few positive skip-grams.
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
###Output
_____no_output_____
###Markdown
Summary This picture summarizes the procedure of generating training example from a sentence.  Compile all steps into one function Skip-gram Sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for Word2Vec With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read text from the file and take a look at the first few lines.
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples. Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Take a look at few examples from `sequences`.
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Add `cache()` and `prefetch()` to improve performance.
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and Training The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset. Subclassed Word2Vec Model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
###Code
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
Tensorboard now shows the Word2Vec model's accuracy and loss. ```python%tensorboard --logdir logs```  Embedding lookup and analysis Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata file.
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook word2vec word2vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through word2vec have proven to be successful on a variety of downstream natural language processing tasks.Note: This tutorial is based on [Efficient estimation of word representations in vector space](https://arxiv.org/pdf/1301.3781.pdf) and [Distributed representations of words and phrases and their compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.These papers proposed two methods for learning representations of words:* **Continuous bag-of-words model**: predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.* **Continuous skip-gram model**: predicts words within a certain range before and after the current word in the same sentence. A worked example of this is given below.You'll use the skip-gram approach in this tutorial. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own word2vec model on a small dataset. This tutorial also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/). Skip-gram and negative sampling While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`. Consider the following sentence of eight words:> The wide road shimmered in the hot sun.The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered a `context word`. Below is a table of skip-grams for target words based on different window sizes. Note: For this tutorial, a window size of `n` implies n words on each side with a total window span of 2*n+1 words across a word.  The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w1, w2, ... wT*, the objective can be written as the average log probability  where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.  where *v* and *v'* are target and context vector representations of words and *W* is vocabulary size. Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words, which are often large (105-107) terms. The [noise contrastive estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) (NCE) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modeling the word distribution, the NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling. The simplified negative sampling objective for a target word is to distinguish the context word from `num_ns` negative samples drawn from noise distribution *Pn(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and `num_ns` negative samples. A negative sample is defined as a `(target_word, context_word)` pair such that the `context_word` does not appear in the `window_size` neighborhood of the `target_word`. For the example sentence, these are a few potential negative samples (when `window_size` is `2`).```(hot, shimmered)(wide, hot)(wide, sun)``` In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial. Setup
###Code
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Vectorize an example sentence Consider the following sentence:> The wide road shimmered in the hot sun.Tokenize the sentence:
###Code
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
###Output
_____no_output_____
###Markdown
Create a vocabulary to save mappings from tokens to integer indices:
###Code
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
###Output
_____no_output_____
###Markdown
Create an inverse vocabulary to save mappings from integer indices to tokens:
###Code
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
###Output
_____no_output_____
###Markdown
Vectorize your sentence:
###Code
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
###Output
_____no_output_____
###Markdown
Generate skip-grams from one sentence The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for word2vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.Note: `negative_samples` is set to `0` here, as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
###Code
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
###Output
_____no_output_____
###Markdown
Print a few positive skip-grams:
###Code
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
###Output
_____no_output_____
###Markdown
Negative sampling for one skip-gram The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the function on one skip-grams's target word and pass the context word as true class to exclude it from being sampled. Key point: `num_ns` (the number of negative samples per a positive context word) in the `[5, 20]` range is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while `num_ns` in the `[2, 5]` range suffices for larger datasets.
###Code
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
###Output
_____no_output_____
###Markdown
Construct one training example For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labeled as `1`) and negative samples (labeled as `0`) for each target word.
###Code
# Add a dimension so you can use concatenation (in the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concatenate a positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label the first context word as `1` (positive) followed by `num_ns` `0`s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape the target to shape `(1,)` and context and label to `(num_ns+1,)`.
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
###Output
_____no_output_____
###Markdown
Check out the context and the corresponding labels for the target word from the skip-gram example above:
###Code
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
###Output
_____no_output_____
###Markdown
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling word2vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
###Code
print("target :", target)
print("context :", context)
print("label :", label)
###Output
_____no_output_____
###Markdown
Summary This diagram summarizes the procedure of generating a training example from a sentence:  Notice that the words `temperature` and `code` are not part of the input sentence. They belong to the vocabulary like certain other indices used in the diagram above. Compile all steps into one function Skip-gram sampling table A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occurring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality. The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to the `skipgrams` function. Inspect the sampling probabilities for a `vocab_size` of 10.
###Code
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
###Output
_____no_output_____
###Markdown
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling. Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective. Generate training data Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
###Code
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for `vocab_size` tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in the dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with a positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
###Output
_____no_output_____
###Markdown
Prepare training data for word2vec With an understanding of how to work with one sentence for a skip-gram negative sampling based word2vec model, you can proceed to generate training examples from a larger list of sentences! Download text corpus You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
###Code
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
###Output
_____no_output_____
###Markdown
Read the text from the file and print the first few lines:
###Code
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
###Output
_____no_output_____
###Markdown
Use the non empty lines to construct a `tf.data.TextLineDataset` object for the next steps:
###Code
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
###Output
_____no_output_____
###Markdown
Vectorize sentences from the corpus You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
###Code
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and the number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the `TextVectorization` layer to normalize, split, and map strings to
# integers. Set the `output_sequence_length` length to pad all samples to the
# same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
###Output
_____no_output_____
###Markdown
Call `TextVectorization.adapt` on the text dataset to create vocabulary.
###Code
vectorize_layer.adapt(text_ds.batch(1024))
###Output
_____no_output_____
###Markdown
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `TextVectorization.get_vocabulary`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
###Code
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
###Output
_____no_output_____
###Markdown
The `vectorize_layer` can now be used to generate vectors for each element in the `text_ds` (a `tf.data.Dataset`). Apply `Dataset.batch`, `Dataset.prefetch`, `Dataset.map`, and `Dataset.unbatch`.
###Code
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
###Output
_____no_output_____
###Markdown
Obtain sequences from the dataset You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a word2vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples.Note: Since the `generate_training_data()` defined earlier uses non-TensorFlow Python/NumPy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map`.
###Code
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
###Output
_____no_output_____
###Markdown
Inspect a few examples from `sequences`:
###Code
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
###Output
_____no_output_____
###Markdown
Generate training examples from sequences `sequences` is now a list of int encoded sentences. Just call the `generate_training_data` function defined earlier to generate training examples for the word2vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be the same, representing the total number of training examples.
###Code
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
###Output
_____no_output_____
###Markdown
Configure the dataset for performance To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your word2vec model!
###Code
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
###Output
_____no_output_____
###Markdown
Apply `Dataset.cache` and `Dataset.prefetch` to improve performance:
###Code
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
###Output
_____no_output_____
###Markdown
Model and training The word2vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product multiplication between the embeddings of target and context words to obtain predictions for labels and compute the loss function against true labels in the dataset. Subclassed word2vec model Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your word2vec model with the following layers:* `target_embedding`: A `tf.keras.layers.Embedding` layer, which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.* `context_embedding`: Another `tf.keras.layers.Embedding` layer, which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.With the subclassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result. Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final word2vec embedding.
###Code
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
###Output
_____no_output_____
###Markdown
Define loss function and compile model For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:``` pythondef custom_loss(x_logit, y_true): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)```It's time to build your model! Instantiate your word2vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
###Code
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for TensorBoard:
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model on the `dataset` for some number of epochs:
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
###Output
_____no_output_____
###Markdown
TensorBoard now shows the word2vec model's accuracy and loss:
###Code
#docs_infra: no_execute
%tensorboard --logdir logs
###Output
_____no_output_____
###Markdown
--> Embedding lookup and analysis Obtain the weights from the model using `Model.get_layer` and `Layer.get_weights`. The `TextVectorization.get_vocabulary` function provides the vocabulary to build a metadata file with one token per line.
###Code
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
###Output
_____no_output_____
###Markdown
Create and save the vectors and metadata files:
###Code
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
###Output
_____no_output_____
###Markdown
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/):
###Code
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____ |
05_NLP_Augment/SSTModel.ipynb | ###Markdown
###Code
! nvidia-smi
! pip install pytorch-lightning --quiet
! pip install OmegaConf --quiet
! pip install nlpaug --quiet
! pip install gdown==3.13.0
import copy
import torch
import torchtext
import pytorch_lightning as pl
from torch import nn
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from pytorch_lightning.metrics.functional import accuracy
from torchtext.utils import download_from_url, extract_archive
from torchtext.data.utils import get_tokenizer
from torchtext.experimental.functional import sequential_transforms, ngrams_func, totensor, vocab_func
from torchtext.vocab import build_vocab_from_iterator
import torchtext.experimental.functional as text_f
import nlpaug.augmenter.char as nac
import nlpaug.augmenter.word as naw
import nlpaug.augmenter.sentence as nas
import nlpaug.flow as nafc
from nlpaug.util import Action
import random
import gdown
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from pathlib import Path
from omegaconf import OmegaConf
from zipfile import ZipFile
from typing import Optional, Tuple, Any, Dict, List
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
plt.style.use("dark_background")
class StanfordSentimentTreeBank(Dataset):
"""The Standford Sentiment Tree Bank Dataset
Stanford Sentiment Treebank V1.0
This is the dataset of the paper:
Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher Manning, Andrew Ng and Christopher Potts
Conference on Empirical Methods in Natural Language Processing (EMNLP 2013)
If you use this dataset in your research, please cite the above paper.
@incollection{SocherEtAl2013:RNTN,
title = {{Parsing With Compositional Vector Grammars}},
author = {Richard Socher and Alex Perelygin and Jean Wu and Jason Chuang and Christopher Manning and Andrew Ng and Christopher Potts},
booktitle = {{EMNLP}},
year = {2013}
}
"""
ORIG_URL = "http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip"
DATASET_NAME = "StanfordSentimentTreeBank"
URL = 'https://drive.google.com/uc?id=1urNi0Rtp9XkvkxxeKytjl1WoYNYUEoPI'
OUTPUT = 'sst_dataset.zip'
def __init__(self, root, vocab=None, text_transforms=None, label_transforms=None, split='train', ngrams=1, use_transformed_dataset=True):
"""Initiate text-classification dataset.
Args:
data: a list of label and text tring tuple. label is an integer.
[(label1, text1), (label2, text2), (label2, text3)]
vocab: Vocabulary object used for dataset.
transforms: a tuple of label and text string transforms.
"""
super(self.__class__, self).__init__()
if split not in ['train', 'test']:
raise ValueError(f'split must be either ["train", "test"] unknown split {split}')
self.vocab = vocab
gdown.cached_download(self.URL, Path(root) / self.OUTPUT)
self.generate_sst_dataset(split, Path(root) / self.OUTPUT)
tokenizer = get_tokenizer("basic_english")
# the text transform can only work at the sentence level
# the rest of tokenization and vocab is done by this class
self.text_transform = sequential_transforms(tokenizer, text_f.ngrams_func(ngrams))
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
yield transforms(line)
return build_vocab_from_iterator(apply_transforms(data), len(data))
if self.vocab is None:
# vocab is always built on the train dataset
self.vocab = build_vocab(self.dataset_train["phrase"], self.text_transform)
if text_transforms is not None:
self.text_transform = sequential_transforms(
self.text_transform, text_transforms, text_f.vocab_func(self.vocab), text_f.totensor(dtype=torch.long)
)
else:
self.text_transform = sequential_transforms(
self.text_transform, text_f.vocab_func(self.vocab), text_f.totensor(dtype=torch.long)
)
self.label_transform = sequential_transforms(text_f.totensor(dtype=torch.long))
def generate_sst_dataset(self, split, dataset_file):
with ZipFile(dataset_file) as datasetzip:
with datasetzip.open('sst_dataset/sst_dataset_augmented.csv') as f:
dataset = pd.read_csv(f, index_col=0)
self.dataset_orig = dataset.copy()
dataset_train_raw = dataset[dataset['splitset_label'].isin([1, 3])]
self.dataset_train = pd.concat([
dataset_train_raw[['phrase_cleaned', 'sentiment_values']].rename(columns={"phrase_cleaned": 'phrase'}),
dataset_train_raw[['synonym_sentences', 'sentiment_values']].rename(columns={"synonym_sentences": 'phrase'}),
dataset_train_raw[['backtranslated', 'sentiment_values']].rename(columns={"backtranslated": 'phrase'}),
], ignore_index=True)
if split == 'train':
self.dataset = self.dataset_train.copy()
else:
self.dataset = dataset[dataset['splitset_label'].isin([2])] \
[['phrase_cleaned', 'sentiment_values']] \
.rename(columns={"phrase_cleaned": 'phrase'}) \
.reset_index(drop=True)
@staticmethod
def discretize_label(label):
if label <= 0.2: return 0
if label <= 0.4: return 1
if label <= 0.6: return 2
if label <= 0.8: return 3
return 4
def __getitem__(self, idx):
# print(f'text: {self.dataset["sentence"].iloc[idx]}, label: {self.dataset["sentiment_values"].iloc[idx]}')
text = self.text_transform(self.dataset['phrase'].iloc[idx])
label = self.label_transform(self.dataset['sentiment_values'].iloc[idx])
# print(f't_text: {text} {text.shape}, t_label: {label}')
return label, text
def __len__(self):
return len(self.dataset)
@staticmethod
def get_labels():
return ['very negative', 'negative', 'neutral', 'positive', 'very positive']
def get_vocab(self):
return self.vocab
@property
def collator_fn(self):
def collate_fn(batch):
pad_idx = self.get_vocab()['<pad>']
labels, sequences = zip(*batch)
labels = torch.stack(labels)
lengths = torch.LongTensor([len(sequence) for sequence in sequences])
# print('before padding: ', sequences[40])
sequences = torch.nn.utils.rnn.pad_sequence(sequences,
padding_value = pad_idx,
batch_first=True
)
# print('after padding: ', sequences[40])
return labels, sequences, lengths
return collate_fn
class SSTDataModule(pl.LightningDataModule):
"""
DataModule for SST, train, val, test splits and transforms
"""
name = "stanford_sentiment_treebank"
def __init__(
self,
data_dir: str = '.',
val_split: int = 1000,
num_workers: int = 2,
batch_size: int = 64,
*args,
**kwargs,
):
"""
Args:
data_dir: where to save/load the data
val_split: how many of the training images to use for the validation split
num_workers: how many workers to use for loading data
normalize: If true applies image normalize
batch_size: desired batch size.
"""
super().__init__(*args, **kwargs)
self.data_dir = data_dir
self.val_split = val_split
self.num_workers = num_workers
self.batch_size = batch_size
self.dataset_train = ...
self.dataset_val = ...
self.dataset_test = ...
self.SST = StanfordSentimentTreeBank
def prepare_data(self):
"""Saves IMDB files to `data_dir`"""
self.SST(self.data_dir)
def setup(self, stage: Optional[str] = None):
"""Split the train and valid dataset"""
train_trans, test_trans = self.default_transforms
train_dataset = self.SST(self.data_dir, split='train', **train_trans)
test_dataset = self.SST(self.data_dir, split='test', **test_trans)
train_length = len(train_dataset)
self.raw_dataset_train = train_dataset
self.raw_dataset_test = test_dataset
# self.dataset_train, self.dataset_val = random_split(train_dataset, [train_length - self.val_split, self.val_split])
self.dataset_train = train_dataset
self.dataset_test = test_dataset
def train_dataloader(self):
"""IMDB train set removes a subset to use for validation"""
loader = DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def val_dataloader(self):
"""IMDB val set uses a subset of the training set for validation"""
loader = DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def test_dataloader(self):
"""IMDB test set uses the test split"""
loader = DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.collator_fn
)
return loader
def get_vocab(self):
return self.raw_dataset_train.get_vocab()
@property
def default_transforms(self):
train_transforms = {
'text_transforms': text_f.sequential_transforms(
random_deletion,
random_swap
),
'label_transforms': None
}
test_transforms = {
'text_transforms': None,
'label_transforms': None
}
return train_transforms, test_transforms
@property
def collator_fn(self):
return self.raw_dataset_train.collator_fn
def random_deletion(words, p=0.1):
if len(words) == 1: # return if single word
return words
remaining = list(filter(lambda x: random.uniform(0, 1) > p, words))
if len(remaining) == 0: # if not left, sample a random word
return [random.choice(words)]
else:
return remaining
def random_swap(sentence, n=3, p=0.1):
length = range(len(sentence))
n = min(n, len(sentence))
for _ in range(n):
if random.uniform(0, 1) > p:
idx1, idx2 = random.choices(length, k=2)
sentence[idx1], sentence[idx2] = sentence[idx2], sentence[idx1]
return sentence
class SSTModel(pl.LightningModule):
def __init__(self, hparams, *args, **kwargs):
super().__init__()
self.save_hyperparameters(hparams)
self.num_classes = self.hparams.output_dim
self.embedding = nn.Embedding(self.hparams.input_dim, self.hparams.embedding_dim)
self.lstm = nn.LSTM(
self.hparams.embedding_dim,
self.hparams.hidden_dim,
num_layers=self.hparams.num_layers,
dropout=self.hparams.dropout,
batch_first=True
)
self.proj_layer = nn.Sequential(
nn.Linear(self.hparams.hidden_dim, self.hparams.hidden_dim),
nn.BatchNorm1d(self.hparams.hidden_dim),
nn.ReLU(),
nn.Dropout(self.hparams.dropout),
)
self.fc = nn.Linear(self.hparams.hidden_dim, self.num_classes)
self.loss = nn.CrossEntropyLoss()
def init_state(self, sequence_length):
return (torch.zeros(self.hparams.num_layers, sequence_length, self.hparams.hidden_dim).to(self.device),
torch.zeros(self.hparams.num_layers, sequence_length, self.hparams.hidden_dim).to(self.device))
def forward(self, text, text_length, prev_state=None):
# [batch size, sentence length] => [batch size, sentence len, embedding size]
embedded = self.embedding(text)
# packs the input for faster forward pass in RNN
packed = torch.nn.utils.rnn.pack_padded_sequence(
embedded, text_length.to('cpu'),
enforce_sorted=False,
batch_first=True
)
# [batch size sentence len, embedding size] =>
# output: [batch size, sentence len, hidden size]
# hidden: [batch size, 1, hidden size]
packed_output, curr_state = self.lstm(packed, prev_state)
hidden_state, cell_state = curr_state
# print('hidden state shape: ', hidden_state.shape)
# print('cell')
# unpack packed sequence
# unpacked, unpacked_len = torch.nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True)
# print('unpacked: ', unpacked.shape)
# [batch size, sentence len, hidden size] => [batch size, num classes]
# output = self.proj_layer(unpacked[:, -1])
output = self.proj_layer(hidden_state[-1])
# print('output shape: ', output.shape)
output = self.fc(output)
return output, curr_state
def shared_step(self, batch, batch_idx):
label, text, text_length = batch
logits, in_state = self(text, text_length)
loss = self.loss(logits, label)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
metric = {'loss': loss, 'acc': acc}
return metric
def training_step(self, batch, batch_idx):
metrics = self.shared_step(batch, batch_idx)
log_metrics = {'train_loss': metrics['loss'], 'train_acc': metrics['acc']}
self.log_dict(log_metrics, prog_bar=True)
return metrics
def validation_step(self, batch, batch_idx):
metrics = self.shared_step(batch, batch_idx)
return metrics
def validation_epoch_end(self, outputs):
acc = torch.stack([x['acc'] for x in outputs]).mean()
loss = torch.stack([x['loss'] for x in outputs]).mean()
log_metrics = {'val_loss': loss, 'val_acc': acc}
self.log_dict(log_metrics, prog_bar=True)
return log_metrics
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
accuracy = torch.stack([x['acc'] for x in outputs]).mean()
self.log('hp_metric', accuracy)
self.log_dict({'test_acc': accuracy}, prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True),
'monitor': 'train_loss',
'name': 'scheduler'
}
return [optimizer], [lr_scheduler]
###Output
_____no_output_____
###Markdown
**Sanity Checking**
###Code
sst_dataset = SSTDataModule(batch_size=128)
sst_dataset.setup()
loader = sst_dataset.train_dataloader()
batch = next(iter(loader))
label, text, text_length = batch
text.size(0)
label.shape, text.shape, text_length.shape
text[0]
hparams = OmegaConf.create({
'input_dim': len(sst_dataset.get_vocab()),
'embedding_dim': 128,
'num_layers': 2,
'hidden_dim': 64,
'dropout': 0.5,
'output_dim': len(StanfordSentimentTreeBank.get_labels()),
'lr': 5e-4,
'epochs': 30,
'use_lr_finder': False
})
sst_model = SSTModel(hparams)
output, (h, c) = sst_model(text, text_length)
output.shape
sst_model = SSTModel(hparams)
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
save_top_k=3,
mode='min'
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(gpus=1, max_epochs=hparams.epochs, callbacks=[lr_monitor, checkpoint_callback], progress_bar_refresh_rate=1, reload_dataloaders_every_epoch=True)
if hparams.use_lr_finder:
# Run learning rate finder
lr_finder = trainer.tuner.lr_find(sst_model, sst_dataset, max_lr=5)
# Plot with
fig = lr_finder.plot(suggest=True)
fig.show()
# Pick point based on plot, or get suggestion
new_lr = lr_finder.suggestion()
print(f'lr finder suggested lr: {new_lr}')
# update hparams of the model
sst_model.hparams.lr = new_lr
trainer.fit(sst_model, sst_dataset)
trainer.test()
%load_ext tensorboard
%tensorboard --logdir lightning_logs/
###Output
_____no_output_____
###Markdown
Model Diagnosis
###Code
loader = sst_dataset.test_dataloader()
batch = next(iter(loader))
label, text, text_length = batch
label.shape, text.shape, text_length.shape
def k_missclassified(batch, model, datamodule, k=10):
model.eval()
with torch.no_grad():
label, text, text_length = batch
logits, in_state = model(text, text_length)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
miss_idx = pred != label
vocab = datamodule.get_vocab()
for t, l, p in zip(text.numpy()[miss_idx][:k], label.numpy()[miss_idx][:k], pred.numpy()[miss_idx][:k]):
sentence = ' '.join(vocab.itos[x] for x in t).replace(" <pad>", "")
print('sentence: ', sentence)
print(f'label: {datamodule.dataset_train.get_labels()[l]}, predicted: {datamodule.dataset_train.get_labels()[p]}')
print('\n')
k_missclassified(batch, sst_model, sst_dataset)
def k_correctclassified(batch, model, datamodule, k=10):
model.eval()
with torch.no_grad():
label, text, text_length = batch
logits, in_state = model(text, text_length)
pred = torch.argmax(F.log_softmax(logits, dim=1), dim=1)
acc = accuracy(pred, label)
miss_idx = label == pred
vocab = datamodule.get_vocab()
for t, l, p in zip(text.numpy()[miss_idx][:k], label.numpy()[miss_idx][:k], pred.numpy()[miss_idx][:k]):
sentence = ' '.join(vocab.itos[x] for x in t).replace(" <pad>", "")
print('sentence: ', sentence)
print(f'label: {datamodule.dataset_train.get_labels()[l]}, predicted: {datamodule.dataset_train.get_labels()[p]}')
print('\n')
k_correctclassified(batch, sst_model, sst_dataset)
###Output
sentence: effective but <unk> biopic
label: neutral, predicted: neutral
sentence: if you sometimes like to go to the movies to have fun , wasabi is a good place to start .
label: positive, predicted: positive
sentence: emerges as something rare , an issue movie that ' s so honest and keenly observed that it doesn ' t feel like one .
label: very positive, predicted: very positive
sentence: this is a film well worth seeing , talking and singing heads and all .
label: very positive, predicted: very positive
sentence: what really surprises about wisegirls is its low-key quality and genuine tenderness .
label: positive, predicted: positive
sentence: <unk> wendigo is <unk> why we go to the cinema to be fed through the eye , the heart , the mind .
label: positive, predicted: positive
sentence: one of the greatest family-oriented , fantasy-adventure movies ever .
label: very positive, predicted: very positive
sentence: an utterly compelling ` who wrote it ' in which the reputation of the most famous author who ever lived comes into question .
label: positive, predicted: positive
sentence: illuminating if overly talky documentary .
label: neutral, predicted: neutral
sentence: a masterpiece four years in the making .
label: very positive, predicted: very positive
###Markdown
Misc Stuff
###Code
! ls
ls lightning_logs/version_0
from google.colab import drive
drive.mount('/gdrive')
! ls /gdrive/MyDrive/END2.0/05_NLP_Augment/
# ! cp -r /gdrive/MyDrive/END2.0/05_NLP_Augment/lightning_logs .
# ! cp -r lightning_logs /gdrive/MyDrive/END2.0/05_NLP_Augment/
# drive.flush_and_unmount()
# ! rm -r lightning_logs
# ! du -sh *
# ! tensorboard dev upload --logdir lightning_logs \
# --name "END2 05_NLP_Augment - Satyajit" \
# --description "Experiments on NLP Augmentation on SST Dataset"
###Output
_____no_output_____ |
notebooks/18 - KEGG.ipynb | ###Markdown
KEGG====KEGG () is a database resource for understandinghigh-level functions and utilities of the biological system, such as thecell, the organism and the ecosystem, from molecular-level information,especially large-scale molecular datasets generated by genome sequencingand other high-throughput experimental technologies.Please note that the KEGG parser implementation in Biopython isincomplete. While the KEGG website indicates many flat file formats,only parsers and writers for compound, enzyme, and map are currentlyimplemented. However, a generic parser is implemented to handle theother formats.Parsing KEGG records--------------------Parsing a KEGG record is as simple as using any other file format parserin Biopython. (Before running the following codes, please openhttp://rest.kegg.jp/get/ec:5.4.2.2 with your web browser and save it asec\_5.4.2.2.txt.)
###Code
!wget http://rest.kegg.jp/get/ec:5.4.2.2 -O ec_5.4.2.2.txt
from Bio.KEGG import Enzyme
records = Enzyme.parse(open("ec_5.4.2.2.txt"))
record = list(records)[0]
record.classname
record.entry
###Output
_____no_output_____
###Markdown
The following section will shows how to download the above enzyme usingthe KEGG api as well as how to use the generic parser with data thatdoes not have a custom parser implemented.Querying the KEGG API---------------------Biopython has full support for the querying of the KEGG api. Queryingall KEGG endpoints are supported; all methods documented by KEGG() are supported. Theinterface has some validation of queries which follow rules defined onthe KEGG site. However, invalid queries which return a 400 or 404 mustbe handled by the user.First, here is how to extend the above example by downloading therelevant enzyme and passing it through the Enzyme parser.
###Code
from Bio.KEGG import REST
from Bio.KEGG import Enzyme
request = REST.kegg_get("ec:5.4.2.2")
open("ec_5.4.2.2.txt", 'w').write(request.read().decode("utf-8"))
records = Enzyme.parse(open("ec_5.4.2.2.txt"))
record = list(records)[0]
record.classname
record.entry
###Output
_____no_output_____
###Markdown
Now, here’s a more realistic example which shows a combination ofquerying the KEGG API. This will demonstrate how to extract a unique setof all human pathway gene symbols which relate to DNA repair. The stepsthat need to be taken to do so are as follows. First, we need to get alist of all human pathways. Secondly, we need to filter those for oneswhich relate to “repair”. Lastly, we need to get a list of all the genesymbols in all repair pathways.
###Code
from Bio.KEGG import REST
human_pathways = REST.kegg_list("pathway", "hsa").read()
human_pathways.decode("utf-8").split("\n")[0:5]
# Filter all human pathways for repair pathways
repair_pathways = []
for line in human_pathways.decode("utf-8").rstrip().split("\n"):
entry, description = line.split("\t")
if "repair" in description:
repair_pathways.append(entry)
repair_pathways
# Get the genes for pathways and add them to a list
repair_genes = []
for pathway in repair_pathways:
pathway_file = REST.kegg_get(pathway).read() # query and read each pathway
# iterate through each KEGG pathway file, keeping track of which section
# of the file we're in, only read the gene in each pathway
current_section = None
for line in pathway_file.decode("utf-8").rstrip().split("\n"):
section = line[:12].strip() # section names are within 12 columns
if not section == "":
current_section = section
if current_section == "GENE":
gene_identifiers, gene_description = line[12:].split("; ")
gene_id, gene_symbol = gene_identifiers.split()
if not gene_symbol in repair_genes:
repair_genes.append(gene_symbol)
print("There are %d repair pathways and %d repair genes. The genes are:" % \
(len(repair_pathways), len(repair_genes)))
print(", ".join(repair_genes))
###Output
There are 3 repair pathways and 78 repair genes. The genes are:
OGG1, NTHL1, NEIL1, NEIL2, NEIL3, UNG, TDG, SMUG1, MUTYH, MPG, MBD4, APEX1, APEX2, POLB, POLL, HMGB1, XRCC1, PCNA, POLD1, POLD2, POLD3, POLD4, POLE, POLE2, POLE3, POLE4, LIG1, LIG3, PARP2, PARP1, PARP3, PARP4, FEN1, RBX1, CUL4B, CUL4A, DDB1, DDB2, XPC, RAD23B, RAD23A, CETN2, ERCC8, ERCC6, CDK7, MNAT1, CCNH, ERCC3, ERCC2, GTF2H5, GTF2H1, GTF2H2, GTF2H2C_2, GTF2H2C, GTF2H3, GTF2H4, ERCC5, BIVM-ERCC5, XPA, RPA1, RPA2, RPA3, RPA4, ERCC4, ERCC1, RFC1, RFC4, RFC2, RFC5, RFC3, SSBP1, PMS2, MLH1, MSH6, MSH2, MSH3, MLH3, EXO1
|
deq-colab.ipynb | ###Markdown
DEQ idea & finding stationary points with root finder, maybe root finder demo on small example (but that's close to copying from last year so maybe smth different?)
###Code
def _safe_norm_jax(v):
if not jnp.all(jnp.isfinite(v)):
return jnp.inf
return jnp.linalg.norm(v)
def scalar_search_armijo_jax(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
ite = 0
phi_a0 = phi(alpha0) # First do an update with step size 1
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0, ite
# Otherwise, compute the minimizer of a quadratic interpolant
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
# Otherwise loop with cubic interpolation until we find an alpha which
# satisfies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + jnp.sqrt(jnp.abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
ite += 1
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2, ite
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1, ite
def line_search_jax(update, x0, g0, g, nstep=0, on=True):
"""
`update` is the propsoed direction of update.
Code adapted from scipy.
"""
tmp_s = [0]
tmp_g0 = [g0]
tmp_phi = [jnp.linalg.norm(g0)**2]
s_norm = jnp.linalg.norm(x0) / jnp.linalg.norm(update)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0] # If the step size is so small... just return something
x_est = x0 + s * update
g0_new = g(x_est)
phi_new = _safe_norm_jax(g0_new)**2
if store:
tmp_s[0] = s
tmp_g0[0] = g0_new
tmp_phi[0] = phi_new
return phi_new
if on:
s, phi1, ite = scalar_search_armijo_jax(phi, tmp_phi[0], -tmp_phi[0], amin=1e-2)
if (not on) or s is None:
s = 1.0
ite = 0
x_est = x0 + s * update
if s == tmp_s[0]:
g0_new = tmp_g0[0]
else:
g0_new = g(x_est)
return x_est, g0_new, x_est - x0, g0_new - g0, ite
def rmatvec_jax(part_Us, part_VTs, x):
# Compute x^T(-I + UV^T)
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if jnp.size(part_Us) == 0:
return -x
xTU = jnp.einsum('bij, bijd -> bd', x, part_Us) # (N, threshold)
return -x + jnp.einsum('bd, bdij -> bij', xTU, part_VTs) # (N, 2d, L'), but should really be (N, 1, (2d*L'))
def matvec_jax(part_Us, part_VTs, x):
# Compute (-I + UV^T)x
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if jnp.size(part_Us) == 0:
return -x
VTx = jnp.einsum('bdij, bij -> bd', part_VTs, x) # (N, threshold)
return -x + jnp.einsum('bijd, bd -> bij', part_Us, VTx) # (N, 2d, L'), but should really be (N, (2d*L'), 1)
def broyden_jax(f, z0, x0, threshold, eps=1e-3, stop_mode="rel", result_dict=False, ls=False):
bsz, total_hsize = z0.shape
orig_shape = (bsz,total_hsize)
seq_len = 1
new_shape = (bsz,total_hsize,seq_len)
z0 = z0.reshape(*new_shape)
def g(_z):
# here it is safe to use x out of scope
return (f(_z.reshape(*orig_shape),x0)-_z.reshape(*orig_shape)).reshape(*new_shape)
dev = z0.device()
alternative_mode = 'rel' if stop_mode == 'abs' else 'abs'
z_est = z0 # (bsz, 2d, L')
gz = g(z_est) # (bsz, 2d, L')
nstep = 0
tnstep = 0
# For fast calculation of inv_jacobian (approximately)
Us = jax.device_put(jnp.zeros((bsz, total_hsize, seq_len, threshold)),dev) # One can also use an L-BFGS scheme to further reduce memory
VTs = jax.device_put(jnp.zeros((bsz, threshold, total_hsize, seq_len)),dev)
update = -matvec_jax(Us[:,:,:,:nstep], VTs[:,:nstep], gz) # Formally should be -torch.matmul(inv_jacobian (-I), gx)
prot_break = False
# To be used in protective breaks
protect_thres = (1e6 if stop_mode == "abs" else 1e3) * seq_len
new_objective = 1e8
trace_dict = {'abs': [],
'rel': []}
lowest_dict = {'abs': 1e8,
'rel': 1e8}
lowest_step_dict = {'abs': 0,
'rel': 0}
nstep, lowest_zest, lowest_gz = 0, z_est, gz
while nstep < threshold:
z_est, gz, delta_z, delta_gz, ite = line_search_jax(update, z_est, gz, g, nstep=nstep, on=ls)
nstep += 1
tnstep += (ite+1)
abs_diff = jnp.linalg.norm(gz)
rel_diff = abs_diff / (jnp.linalg.norm(gz + z_est) + 1e-9)
diff_dict = {'abs': abs_diff,
'rel': rel_diff}
trace_dict['abs'].append(abs_diff)
trace_dict['rel'].append(rel_diff)
for mode in ['rel', 'abs']:
if diff_dict[mode] < lowest_dict[mode]:
if mode == stop_mode:
lowest_zest, lowest_gz = jnp.copy(z_est), jnp.copy(gz)
lowest_dict[mode] = diff_dict[mode]
lowest_step_dict[mode] = nstep
new_objective = diff_dict[stop_mode]
if new_objective < eps: break
if new_objective < 3*eps and nstep > 30 and np.max(trace_dict[stop_mode][-30:]) / np.min(trace_dict[stop_mode][-30:]) < 1.3:
# if there's hardly been any progress in the last 30 steps
break
if new_objective > trace_dict[stop_mode][0] * protect_thres:
prot_break = True
break
part_Us, part_VTs = Us[:,:,:,:nstep-1], VTs[:,:nstep-1]
vT = rmatvec_jax(part_Us, part_VTs, delta_z)
u = (delta_z - matvec_jax(part_Us, part_VTs, delta_gz)) / jnp.einsum('bij, bij -> b', vT, delta_gz)[:,None,None]
vT = jnp.nan_to_num(vT,nan=0.)
u = jnp.nan_to_num(u,nan=0.)
VTs = VTs.at[:,nstep-1].set(vT)
Us = Us.at[:,:,:,nstep-1].set(u)
update = -matvec_jax(Us[:,:,:,:nstep], VTs[:,:nstep], gz)
# Fill everything up to the threshold length
for _ in range(threshold+1-len(trace_dict[stop_mode])):
trace_dict[stop_mode].append(lowest_dict[stop_mode])
trace_dict[alternative_mode].append(lowest_dict[alternative_mode])
lowest_zest = lowest_zest.reshape(*orig_shape)
# print("broyden",jnp.linalg.norm(z_est),jnp.linalg.norm(gz))
if result_dict:
return {"result": lowest_zest,
"lowest": lowest_dict[stop_mode],
"nstep": lowest_step_dict[stop_mode],
"prot_break": prot_break,
"abs_trace": trace_dict['abs'],
"rel_trace": trace_dict['rel'],
"eps": eps,
"threshold": threshold}
else:
return lowest_zest
def newton_jax(f, z0, x0, threshold, eps=1e-3):
# TODO replace with jax while
# note: f might ignore x0 (i.e. with backward pass)
orig_shape = z0.shape
def g(_z):
# this reshaping is to enable solving with Jacobian
return (f(_z.reshape(*orig_shape),x0)-_z.reshape(*orig_shape)).reshape(-1)
jac_g = jax.jacfwd(g)
z = z0.reshape(-1)
gz = g(z)
gz_norm = jnp.linalg.norm(gz)
nstep = 0
while nstep < threshold and gz_norm > eps:
# solve system
jgz = jac_g(z)
# print("gz",gz.shape,jnp.linalg.norm(gz))
# print("jgz",jgz.shape,jnp.linalg.norm(jgz))
delta_z = jnp.linalg.solve(jgz,-gz)
# print("delta_z",delta_z.shape,jnp.linalg.norm(delta_z))
z = z + delta_z
# need to compute gx here to decide whether to stop
gz = g(z)
gz_norm = jnp.linalg.norm(gz)
nstep += 1
z = z.reshape(*orig_shape).astype(jnp.float32)
# assert False
return z
def direct_jax(f, z0, x0, threshold, eps=1e-3):
# TODO replace with jax while
nstep = 0
z_old = z0
z_new = f(z0,x0)
gz = z_new-z_old
gz_norm = jnp.linalg.norm(gz)
min_gz_norm, min_z = gz_norm, z_new
while nstep < threshold and gz_norm > eps:
z_old = z_new
z_new = f(z_old,x0)
gz = z_new-z_old
gz_norm = jnp.linalg.norm(gz)
if gz_norm < min_gz_norm:
min_gz_norm, min_z = gz_norm, z_new
nstep += 1
# print("min_gz_norm",min_gz_norm,"nstep",nstep)
return min_z
class MDEQBlock(nn.Module):
curr_branch: int
channels: List[int]
kernel_size: Tuple[int] = (3, 3)
num_groups: int = 2
kernel_init = jax.nn.initializers.glorot_normal()
bias_init = jax.nn.initializers.glorot_normal()
def setup(self):
self.input_dim = self.channels[self.curr_branch]
self.hidden_dim = 2*self.input_dim
self.conv1 = nn.Conv(features=self.hidden_dim, kernel_size=self.kernel_size,
strides=(1,1))
self.group1 = nn.GroupNorm(num_groups=self.num_groups)
self.relu = nn.relu
self.conv2 = nn.Conv(features=self.input_dim, kernel_size=self.kernel_size,
strides=(1,1))
self.group2 = nn.GroupNorm(num_groups=self.num_groups)
self.group3 = nn.GroupNorm(num_groups=self.num_groups)
def __call__(self, x, branch, injection):
# forward pass
h1 = self.group1(self.conv1(x))
h1 = self.relu(h1)
h2 = self.conv2(h1)
# inject original input if resolution=0
if branch == 0:
h2 += injection
h2 = self.group2(h2)
# residual
h2 += x
h3 = self.relu(h2)
out = self.group3(h3)
return out
class DownSample(nn.Module):
channels: List[int]
branches: Tuple[int]
num_groups: int
kernel_init = jax.nn.initializers.glorot_normal()
def setup(self):
self.in_chan = self.channels[self.branches[0]]
self.out_chan = self.channels[self.branches[1]]
to_res, from_res = self.branches # sampling from resolution from_res to to_res
num_samples = to_res - from_res
assert num_samples > 0
down_block = []
conv_down = nn.Conv(features=self.in_chan, kernel_size=(3,3), strides=(2,2), padding=((1,1),(1,1)), use_bias=False)
group_down = nn.GroupNorm(num_groups=self.num_groups)
relu_down = nn.relu
for n in range(num_samples-1):
down_block += [conv_down, group_down, relu_down]
conv_down = nn.Conv(features=self.out_chan, kernel_size=(3,3), strides=(2,2), padding=((1,1),(1,1)), use_bias=False)
down_block += [conv_down, group_down]
self.downsample_fn = nn.Sequential(down_block)
def __call__(self, z_plus):
out = self.downsample_fn(z_plus)
return out
class UpSample(nn.Module):
channels: List[int]
branches: Tuple[int]
num_groups: int
kernel_init = jax.nn.initializers.glorot_normal()
def setup(self):
self.in_chan = self.channels[self.branches[0]]
self.out_chan = self.channels[self.branches[1]]
self.upsample_fn = self._upsample()
''' the following is from https://github.com/google/jax/issues/862 '''
def interpolate_bilinear(self, im, rows, cols):
# based on http://stackoverflow.com/a/12729229
col_lo = np.floor(cols).astype(int)
col_hi = col_lo + 1
row_lo = np.floor(rows).astype(int)
row_hi = row_lo + 1
nrows, ncols = im.shape[-3:-1]
def cclip(cols): return np.clip(cols, 0, ncols - 1)
def rclip(rows): return np.clip(rows, 0, nrows - 1)
Ia = im[..., rclip(row_lo), cclip(col_lo), :]
Ib = im[..., rclip(row_hi), cclip(col_lo), :]
Ic = im[..., rclip(row_lo), cclip(col_hi), :]
Id = im[..., rclip(row_hi), cclip(col_hi), :]
wa = np.expand_dims((col_hi - cols) * (row_hi - rows), -1)
wb = np.expand_dims((col_hi - cols) * (rows - row_lo), -1)
wc = np.expand_dims((cols - col_lo) * (row_hi - rows), -1)
wd = np.expand_dims((cols - col_lo) * (rows - row_lo), -1)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
def upsampling_wrap(self, resize_rate):
def upsampling_method(img):
nrows, ncols = img.shape[-3:-1]
delta = 0.5/resize_rate
rows = np.linspace(delta,nrows-delta, np.int32(resize_rate*nrows))
cols = np.linspace(delta,ncols-delta, np.int32(resize_rate*ncols))
ROWS, COLS = np.meshgrid(rows,cols,indexing='ij')
img_resize_vec = self.interpolate_bilinear(img, ROWS.flatten(), COLS.flatten())
img_resize = img_resize_vec.reshape(img.shape[:-3] +
(len(rows),len(cols)) +
img.shape[-1:])
return img_resize
return upsampling_method
''' end copy '''
def _upsample(self):
to_res, from_res = self.branches # sampling from resolution from_res to to_res
num_samples = from_res - to_res
assert num_samples > 0
return nn.Sequential([nn.Conv(features=self.out_chan, kernel_size=(1, 1), use_bias=False), #kernel_init=self.kernel_init),
nn.GroupNorm(num_groups=self.num_groups),
self.upsampling_wrap(resize_rate=2**num_samples)])
def __call__(self, z_plus):
return self.upsample_fn(z_plus)
def cringy_reshape(in_vec, shape_list):
start = 0
out_vec = []
if isinstance(in_vec, list):
raise ValueError
# in_vec = jnp.array(in_vec)
for size in shape_list:
my_elems = jnp.prod(jnp.array(size[1:]))
end = start+my_elems
my_chunk = jnp.copy(in_vec[:, start:end])
start += my_elems
my_chunk = jnp.reshape(my_chunk, size)
out_vec.append(my_chunk)
return out_vec
class Encoder(nn.Module):
"""
Maps image to initial latent representation
AKA the grey part in the diagram
"""
channels: List[int] = field(default_factory=lambda:[24, 24])
training: bool = True
def setup(self):
self.conv1 = nn.Conv(features=self.channels[0],
kernel_size=(3, 3), strides=(1, 1))
self.bn1 = nn.BatchNorm()
self.relu = nn.relu
def __call__(self, x):
x = self.relu(self.bn1(self.conv1(x), use_running_average=True))
return x
class CLSBlock(nn.Module):
"""
A tool for using the
"""
input_dim: int
output_dim: int
downsample: bool
expansion: int=4
def setup(self):
# init-substitute for flax
self.conv1 = nn.Conv(features=self.output_dim, kernel_size=(1,1),
strides=(1,1))#, kernel_init=self.kernel_init, bias_init=self.bias_init)
self.bn1 = nn.BatchNorm()
self.relu = nn.relu
self.conv2 = nn.Conv(features=self.output_dim, kernel_size=(3,3), strides=(1,1))#, kernel_init=self.kernel_init, bias_init=self.bias_init)
self.bn2 = nn.BatchNorm()
self.conv3 = nn.Conv(features=self.output_dim*self.expansion, kernel_size=(1,1), strides=(1,1))#, kernel_init=self.kernel_init, bias_init=self.bias_init)
self.bn3 = nn.BatchNorm()
if self.downsample:
self.ds_conv = nn.Conv(self.output_dim*self.expansion, kernel_size=(1,1), strides=(1,1), use_bias=False)
self.ds_bn = nn.BatchNorm()
def __call__(self, x, injection=None):
# forward pass
if injection is None:
injection = 0
h1 = self.bn1(self.conv1(x), use_running_average=True)
h1 = self.relu(h1)
h2 = self.bn2(self.conv2(h1), use_running_average=True)
h2 = self.relu(h2)
h3 = self.bn3(self.conv3(h2), use_running_average=True)
if self.downsample:
x = self.ds_bn(self.ds_conv(x), use_running_average=True)
h3 += x
return nn.relu(h3)
###Output
_____no_output_____
###Markdown
...
###Code
class Classifier(nn.Module):
channels: List[int] = field(default_factory=lambda:[24, 24])
output_channels: List[int] = field(default_factory=lambda:[8, 16])
expansion: int = 4
final_chansize: int = 200
num_classes: int = 10
def make_cls_block(self, in_chan, out_chan):
downsample = False
if in_chan != out_chan * self.expansion:
downsample = True
return CLSBlock(in_chan, out_chan, downsample)
def setup(self):
self.num_branches = len(self.channels)
combine_modules = []
for i in range(len(self.channels)):
output_mod = self.make_cls_block(self.channels[i], self.output_channels[i])
combine_modules.append(output_mod)
self.combine_modules = combine_modules
self.final_layer_conv = nn.Conv(self.final_chansize, kernel_size=(1,1))
self.final_layer_bn = nn.BatchNorm()
self.classifier = nn.Dense(self.num_classes)
def __call__(self, y):
y_final = self.combine_modules[0](y[0])
for i in range(len(self.channels)-1):
y_final = self.combine_modules[i+1](y[i+1])
y_final = self.final_layer_bn(self.final_layer_conv(y_final), use_running_average=True)
y_final = nn.relu(y_final)
y_final = nn.avg_pool(y_final, window_shape=y_final.shape[1:3])
y_final = jnp.reshape(y_final, (y_final.shape[0], -1))
y_final = self.classifier(y_final)
return y_final
def transform(image, label, num_classes=10):
image = jnp.float32(image) / 255.
image = np.expand_dims(image, -1)
# image = np.tile(image, (1,1,1,24))
label = jnp.array(label)
return image, label
def load_data():
test_ds = torchvision.datasets.MNIST(root="data", train=False,download=True)
train_ds = torchvision.datasets.MNIST(root="data", train=True,download=True)
train_images, train_labels = transform(train_ds.data[:10000], train_ds.targets[:10000])
test_images, test_labels = transform(test_ds.data[:1000], test_ds.targets[:1000])
print(f"MUM TRAINING IMAGES:::{train_images.shape[0]}")
print(f"MUM TEST IMAGES:::{test_images.shape[0]}")
return train_images, train_labels, test_images, test_labels
@partial(jax.custom_vjp, nondiff_argnums=(0, 1, 2, 3,)) # nondiff are all except for weights and z/x
def rootfind(solver_fn: Callable,
f_fn: Callable,
threshold: int,
eps: float,
weights: dict,
z: jnp.ndarray,
x: jnp.ndarray):
f_fn = partial(f_fn, weights)
return jax.lax.stop_gradient(solver_fn(f_fn, z, x, threshold, eps=1e-3))
# Its forward call (basically just calling it)
def _rootfind_fwd(solver_fn: Callable,
f_fn: Callable,
threshold: int,
eps: float,
weights: dict,
z: jnp.ndarray,
x: jnp.ndarray):
z = rootfind(solver_fn, f_fn, threshold, eps, weights, z, x)
# print("fwd residual",jnp.linalg.norm(f_fn(weights,z,x)-z)/jnp.linalg.norm(z))
return z, (weights, z, x)
# Its backward call (its inputs)
def _rootfind_bwd(solver_fn: Callable,
f_fn: Callable,
threshold: int,
eps: float,
res,
grad):
weights, z, x = res
(_, vjp_fun) = jax.vjp(f_fn, weights, z, x)
def z_fn(z,x): # gets transpose Jac w.r.t. weights and z using vjp_fun
(Jw_T, Jz_T, _) = vjp_fun(z)
return Jz_T + grad
#def gimme_jzt(z):
# (Jw_T, Jz_T, _) = vjp_fun(z)
# return Jz_T
#z0 = jnp.zeros_like(grad)
key, subkey = random.split(jax.random.PRNGKey(0))
z0 = random.normal(subkey, grad.shape)
x0 = None # dummy, z_fn does not use x
g = solver_fn(z_fn, z0, x0, threshold, eps)
#Jz_T = gimme_jzt(z0)
#g = -jnp.transpose(jnp.linalg.pinv(Jz_T) * jnp.transpose(grad))
#print('diff broyden vs inv', jnp.linalg.norm(g - g_p))
#print("bwd residual",jnp.linalg.norm(z_fn(g,x0)-g)/jnp.linalg.norm(g))
return (None, g, None)
rootfind.defvjp(_rootfind_fwd, _rootfind_bwd)
class MDEQFF(nn.Module):
"""
The f_{\theta}(z,x) function that is repeatedly applied
AKA the yellow block in the diagram
"""
num_branches: int
channels: List[int]
num_groups: int
kernel_init = jax.nn.initializers.glorot_normal()
def setup(self):
self.branches = self.stack_branches()
self.fuse_branches = self.fuse()
self.transform = self.transform_output()
def stack_branches(self):
branches = []
for i in range(self.num_branches):
branches.append(MDEQBlock(curr_branch=i, channels=self.channels))
return branches
def fuse(self):#, z_plus, channel_dimensions):
# up- and downsampling stuff
# z_plus: output of residual block
if self.num_branches == 1:
return None
fuse_layers = []
for i in range(self.num_branches):
array = []
for j in range(self.num_branches):
if i == j:
array.append(None)
else:
if i > j:
sampled = DownSample(branches=(i, j), channels=self.channels, num_groups=self.num_groups)
elif i < j:
sampled = UpSample(branches=(i, j), channels=self.channels, num_groups=self.num_groups)
array.append(sampled)
fuse_layers.append(array)
return fuse_layers
def transform_output(self):
transforms = []
for i in range(self.num_branches):
transforms.append(nn.Sequential([nn.Conv(features=self.channels[i], kernel_size=(1, 1),
use_bias=False),
nn.relu,
nn.GroupNorm(num_groups=self.num_groups)]))
return transforms
def __call__(self, z, x, shape_tuple):
batch_size = z.shape[0]
z_list = cringy_reshape(z,shape_tuple)
x_list = cringy_reshape(x,shape_tuple)
# step 1: compute residual blocks
branch_outputs = []
for i in range(self.num_branches):
branch_outputs.append(self.branches[i](z_list[i], i, x_list[i])) # z, branch, x
# step 2: fuse residual blocks
fuse_outputs = []
for i in range(self.num_branches):
intermediate_i = jnp.zeros(branch_outputs[i].shape)
for j in range(self.num_branches):
if i == j:
intermediate_i += branch_outputs[j]
else:
if self.fuse_branches[i][j] is not None:
temp = self.fuse_branches[i][j](z_plus=branch_outputs[j])#, branches=(i, j))
intermediate_i += temp
else:
raise Exception("Should not happen.")
fuse_outputs.append(self.transform[i](intermediate_i))
# stick z back into into one vector
fuse_outputs = jnp.concatenate([fo.reshape(batch_size,-1) for fo in fuse_outputs],axis=1)
assert fuse_outputs.shape[1] == z.shape[1]
return fuse_outputs
def create_mdeq_inputs(x,num_branches):
batch_size = x.shape[0]
x_list = [x]
for i in range(1, num_branches):
bs, H, W, y = x_list[-1].shape
new_item = jnp.zeros((bs, H//2, W//2, y))
x_list.append(new_item)
z_list = [jnp.zeros_like(elem) for elem in x_list]
shape_list = [el.shape for el in z_list]
# make them (batched) vectors
x_vec = jnp.concatenate([x.reshape(batch_size,-1) for x in x_list],axis=1)
z_vec = jnp.concatenate([z.reshape(batch_size,-1) for z in z_list],axis=1)
# i'm not sure if tuple is actually important but I like it for non-mutability
shape_tuple = tuple(shape_list)
return x_vec, z_vec, shape_tuple
def mdeq_fn(x,encoder,decoder,deqff,all_weights,solver_fn=None,mode='broyden'):
threshold = 7
eps = 1e-3
encoder_weights = all_weights["encoder"]
decoder_weights = all_weights["decoder"]
deqff_weights = all_weights["mdeqff"]
batch_size = x.shape[0]
# transform the input image
x = encoder.apply(encoder_weights,x)
# construct inputs (lots of padding and concatenation)
x, z, shape_tuple = create_mdeq_inputs(x,deqff.num_branches)
if mode == 'broyden':
# the root function can only take 3 ndarrays as input
def deqff_root(_weights,_z,_x):
# note: it's safe to pass the shape_tuple here (no tracers)
return deqff.apply(_weights,_z,_x,shape_tuple)
# apply rootfinder with custom vjp
z = rootfind(solver_fn,deqff_root,threshold,eps,deqff_weights,z,x)
elif mode == "direct_solver":
max_evals = 5
threshold=1e-2
evals = 0
residual = jnp.inf
while evals < max_evals and residual > threshold:
z = deqff.apply(deqff_weights,z,x,shape_tuple)
f_z = deqff.apply(deqff_weights,z,x,shape_tuple)
residual = jnp.linalg.norm(f_z - z) / jnp.linalg.norm(z)
evals += 1
elif mode in ["predict", "warmup"]:
z = deqff.apply(deqff_weights,z,x,shape_tuple)
z_list = cringy_reshape(z,shape_tuple)
log_probs = decoder.apply(decoder_weights,z_list)
f_z = deqff.apply(deqff_weights,z,x,shape_tuple)
residuals = jnp.linalg.norm(f_z - z) / jnp.linalg.norm(z)
# reshape back to list
return log_probs, residuals
@jax.jit
def cross_entropy_loss(logits, labels):
'''
should be same as optax.softmax_cross_entropy(logits, labels);
if getting funny results maybe remove log of logits
'''
one_hot_labels = jax.nn.one_hot(labels, num_classes=10)
logits = jax.nn.log_softmax(logits)
acc = (jnp.argmax(logits, axis=-1) == labels).mean()
output = -jnp.mean(jnp.sum(one_hot_labels * logits, axis=-1))
return output, acc
def predict(images,labels,encoder,decoder,deqff,all_weights):
loss, acc = 0,0
batch_size = 128
start, end = 0, 0
loss_vals = []
acc_vals = []
while end < images.shape[0]:
end = min(start+batch_size, images.shape[0])
x_batch = images[start:end]
x_batch = jnp.tile(x_batch, (1,1,1,24))
y_true = labels[start:end]
start = end
log_probs, residual = mdeq_fn(x_batch,encoder,decoder,deqff,all_weights,mode='predict')
loss, acc = cross_entropy_loss(log_probs, y_true)
loss_vals.append(loss * x_batch.shape[0])
acc_vals.append(acc * x_batch.shape[0])
return sum(jnp.array(loss_vals)) / images.shape[0], sum(jnp.array(acc_vals)) / images.shape[0], residual
def train(mode="broyden"):
assert mode in ["broyden", "direct_solver", "warmup"], "INCORRECT MODE"
train_images, train_labels, test_images, test_labels = load_data()
num_images = train_images.shape[0]
image_size = train_images.shape[1]
batch_size = 128
assert batch_size <= train_images.shape[0]
solver_fn = direct_jax
num_groups = 8
channels = [24, 24]
num_branches = 2
# instantiation
encoder = Encoder(channels=channels)
decoder = Classifier() # not sure about what to pass
mdeqff = MDEQFF(num_branches=num_branches, channels=channels, num_groups=num_groups)
# weight initialization
prng = jax.random.PRNGKey(0)
prng, _ = jax.random.split(prng, 2)
x_dummy = jnp.ones((batch_size, image_size, image_size, 24))
x_dummy_2, encoder_weights = encoder.init_with_output(prng,x_dummy)
x_dummy_3, z_dummy, shape_tuple = create_mdeq_inputs(x_dummy_2,num_branches)
z_dummy_2, mdeqff_weights = mdeqff.init_with_output(prng,z_dummy,x_dummy_3,shape_tuple)
z_dummy_3 = cringy_reshape(z_dummy_2,shape_tuple)
o_dummy, classifier_weights = decoder.init_with_output(prng,z_dummy_3)
# collect weights
weights = {'encoder': encoder_weights, 'mdeqff': mdeqff_weights ,'decoder': classifier_weights}
optimizer = optax.adam(learning_rate=0.001)
opt_state = optimizer.init(weights)
loss_fn = cross_entropy_loss
def loss(weights, x_batch, y_true, mode):
logits, residual = mdeq_fn(x_batch,encoder,decoder,mdeqff,weights,solver_fn,mode)
loss, acc = loss_fn(logits, y_true)
return loss, (acc, residual)
def step(weights, opt_state, x_batch, y_true, mode):
(loss_vals, (acc, residual)), grad = jax.value_and_grad(loss, has_aux=True)(weights, x_batch, y_true, mode)
updates, opt_state = optimizer.update(grad, opt_state, weights)
weights = optax.apply_updates(weights, updates)
return weights, opt_state, loss_vals, acc, residual
def generator(batch_size: int=10):
''' https://optax.readthedocs.io/en/latest/meta_learning.html?highlight=generator#meta-learning '''
rng = jax.random.PRNGKey(0)
while True:
rng, k1 = jax.random.split(rng, num=2)
idxs = jax.random.randint(k1, shape=(batch_size,), minval=0, maxval=num_images, dtype=jnp.int32)
yield idxs
def list_shuffler(seed):
rng = jax.random.PRNGKey(seed)
rng, k1 = jax.random.split(rng, num=2)
indices = jnp.arange(0, train_images.shape[0])
shuffled_indices = jax.random.shuffle(k1, indices)
return shuffled_indices
max_epoch = 50
warmup_epochs = 0
if mode == "warmup":
warmup_epochs = 2
max_epoch += warmup_epochs
print_interval = 1
train_log, val_log = [],[]
if mode == "warmup":
print(f"TRAINING MODE:::{mode}+broyden")
else:
print(f"TRAINING MODE:::{mode}")
train_loss_vals, val_loss_vals = [], []
train_acc_vals, val_acc_vals = [], []
train_res_vals, val_res_vals = [], []
for epoch in range(max_epoch):
if mode == "warmup":
if epoch >= warmup_epochs:
mode = "broyden"
print("-------------DONE WARM UP---------------")
elif epoch == 0:
print("----------STARTING WARM UP--------------")
idxs = list_shuffler(epoch)
start, end = 0, 0
loss_vals = []
acc_vals = []
res_vals = []
counter = 0
while end < len(idxs):
end = min(start+batch_size, len(idxs))
idxs_to_grab = idxs[start:end]
x_batch = train_images[idxs_to_grab,...]
x_batch = jnp.tile(x_batch, (1,1,1,24))
y_true = train_labels[idxs_to_grab]
start = end
weights, opt_state, batch_loss, batch_acc, batch_res = step(weights=weights,
opt_state=opt_state,
x_batch=x_batch,
y_true=y_true,
mode=mode)
counter += 1
loss_vals.append(batch_loss * x_batch.shape[0])
acc_vals.append(batch_acc * x_batch.shape[0])
res_vals.append(batch_res * x_batch.shape[0])
print(f"batch_loss {counter} :: {batch_loss} // batch_acc :: {batch_acc} // batch_res :: {batch_res} ")
epoch_loss = sum(jnp.array(loss_vals)) / len(idxs)
epoch_acc = sum(jnp.array(acc_vals)) / len(idxs)
epoch_res = sum(jnp.array(res_vals)) / len(idxs)
train_loss_vals.append(epoch_loss)
train_acc_vals.append(epoch_acc)
train_res_vals.append(epoch_res)
val_loss, val_acc, val_res = predict(test_images,test_labels,encoder,decoder,mdeqff,weights)
val_loss_vals.append(val_loss)
val_acc_vals.append(val_acc)
val_res_vals.append(val_res)
if epoch % print_interval == 0:
print(f"\tTRAIN epoch = {epoch} / loss = {epoch_loss} / acc = {epoch_acc} / res = {epoch_res}")
print(f"\tVAL epoch = {epoch} / loss = {val_loss} / acc = {val_acc} / res = {val_res}")
if epoch_loss < 1e-5:
break
print('finally', batch_loss)
results = {'train_loss_vals': train_loss_vals,
'train_acc_vals': train_acc_vals,
'train_res_vals': train_res_vals,
'val_loss_vals': val_loss_vals,
'val_acc_vals': val_acc_vals,
'val_res_vals': val_res_vals}
return results
broyden_results = train(mode="direct_solver")
###Output
_____no_output_____ |
template/Training_Unets.ipynb | ###Markdown
Training Unet & Attention Unet DependenciesInstall, load, and initialize all required dependencies for this experiment. Install Dependencies
###Code
import sys
!{sys.executable} -m pip install -q -e ../../utils/
###Output
_____no_output_____
###Markdown
Import Dependencies System libraries
###Code
from __future__ import absolute_import, division, print_function
import logging, os, sys
# Enable logging
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO, stream=sys.stdout)
# Re-import packages if they change
%load_ext autoreload
%autoreload 2
# Recursion Depth
sys.setrecursionlimit(1000000000)
# Intialize tqdm to always use the notebook progress bar
import tqdm
tqdm.tqdm = tqdm.tqdm_notebook
# Third-party libraries
import comet_ml
import numpy as np
import pandas as pd
import nilearn.plotting as nip
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import torch
import collections
%matplotlib inline
plt.rcParams["figure.figsize"] = (12,6)
%config InlineBackend.figure_format='retina' # adapt plots for retina displays
import git
import time
# Project utils
import aneurysm_utils
from aneurysm_utils import evaluation, training,preprocessing
if "workspace" in os.getcwd():
ROOT = "/workspace" # local
elif "/group/cake" in os.getcwd():
ROOT = "/group/cake" # Jupyter Lab
###Output
_____no_output_____
###Markdown
Initialize Environment
###Code
env = aneurysm_utils.Environment(project="our-git-project", root_folder=ROOT)
env.cached_data["comet_key"] = "" # Add comet key here to track experiments
env.print_info()
###Output
_____no_output_____
###Markdown
Load DataDownload, explore, and prepare all required data for the experiment in this section.
###Code
dataset_params = {
"prediction": "mask",
"mri_data_selection": "",
"balance_data": False,
"seed": 1,
"resample_voxel_dim": (1.2, 1.2, 1.2)
}
preprocessing_params = {
'min_max_normalize': True,
'mean_std_normalize': False,
'smooth_img': False, # can contain a number: smoothing factor
'intensity_segmentation': False
}
###Output
_____no_output_____
###Markdown
Load Meta Data
###Code
from aneurysm_utils.data_collection import load_aneurysm_dataset
df = load_aneurysm_dataset(
env,
mri_data_selection=dataset_params["mri_data_selection"],
random_state=dataset_params["seed"]
)
df.head()
###Output
_____no_output_____
###Markdown
Load & Split MRI Data
###Code
# Load MRI images and split into train, test, and validation
from aneurysm_utils.data_collection import split_mri_images
train_data, test_data, val_data, _ = split_mri_images(
env,
df,
prediction=dataset_params["prediction"],
encode_labels=False,
random_state=dataset_params["seed"],
balance_data=dataset_params["balance_data"],
resample_voxel_dim=dataset_params["resample_voxel_dim"]
)
mri_imgs_train, labels_train,train_participants = train_data
mri_imgs_test, labels_test,test_participants = test_data
mri_imgs_val, labels_val,val_participants = val_data
from aneurysm_utils import preprocessing
most_commen_shape = preprocessing.check_mri_shapes(mri_imgs_train)
###Output
_____no_output_____
###Markdown
Transform & Preprocess Data
###Code
size = most_commen_shape
train_index = [i for i, e in enumerate(mri_imgs_train) if e.shape != size]
mri_imgs_train = [i for j, i in enumerate(mri_imgs_train) if j not in train_index]
labels_train = [i for j, i in enumerate(labels_train) if j not in train_index]
test_index = [i for i, e in enumerate(mri_imgs_test) if e.shape != size]
mri_imgs_test = [i for j, i in enumerate(mri_imgs_test) if j not in test_index]
labels_test = [i for j, i in enumerate(labels_test) if j not in test_index]
val_index = [i for i, e in enumerate(mri_imgs_val) if e.shape != size]
mri_imgs_val = [i for j, i in enumerate(mri_imgs_val) if j not in val_index]
labels_val = [i for j, i in enumerate(labels_val) if j not in val_index]
mri_imgs_train[0].shape
preprocessing.check_mri_shapes(mri_imgs_train)
print(np.unique(labels_val[0], return_counts=True))
from aneurysm_utils import preprocessing
patch_size = 64
size_of_train = len(mri_imgs_train)
size_of_test = len(mri_imgs_test)
size_of_val = len(mri_imgs_val)
# preprocess all lists as one to have a working mean_std_normalization
mri_imgs = mri_imgs_train + mri_imgs_test + mri_imgs_val
mri_imgs = preprocessing.preprocess(env, mri_imgs, preprocessing_params)
###creating patches
mri_imgs_train = np.asarray(mri_imgs[:size_of_train])
mri_imgs_train = preprocessing.patch_list(mri_imgs_train,patch_size)
mri_imgs_test = np.asarray(mri_imgs[size_of_train : size_of_train + size_of_test])
mri_imgs_test = preprocessing.patch_list(mri_imgs_test,patch_size)
mri_imgs_val = np.asarray(mri_imgs[size_of_train + size_of_test :])
mri_imgs_val = preprocessing.patch_list(mri_imgs_val,patch_size)
# preprocess mask
x, y, h = labels_train[0].shape
labels_train = patch_list(labels_train,patch_size)
labels_test = patch_list(labels_test,patch_size)
labels_val = patch_list(labels_val,patch_size)
###Output
_____no_output_____
###Markdown
Optional: View image
###Code
idx = 0
nip.view_img(
nib.Nifti1Image(mri_imgs_train[0], np.eye(4)),
symmetric_cmap=False,
cmap="Greys_r",
bg_img=False,
black_bg=True,
threshold=1e-03,
draw_cross=False
)
evaluation.plot_slices(mri_train[0])
mri_imgs_train = np.array(mri_imgs_train)
labels_train = np.array(labels_train)
mri_imgs_val = np.array(mri_imgs_val)
labels_val = np.array(labels_val)
mri_imgs_test = np.array(mri_imgs_test)
labels_test = np.array(labels_test)
## nach der zelle 3.3gb
###Output
_____no_output_____
###Markdown
Train ModelImplementation, configuration, and evaluation of the experiment. Train Deep Model 3D data
###Code
from comet_ml import Optimizer
artifacts = {
"train_data": (mri_imgs_train, labels_train),
"val_data": (mri_imgs_val, labels_val),
"test_data": (mri_imgs_test, labels_test)
}
params = {
"batch_size": 6,
"epochs": 45,
"es_patience": 3, # None = deactivate early stopping
"model_name": 'Unet3D_Oktay',
"optimizer_momentum": 0.9,
"optimizer":'Adam',
"scheduler": 'ReduceLROnPlateau',
"criterion": "DiceCELoss",
"sampler": None, #'ImbalancedDatasetSampler2',
"shuffle_train_set": True,
"save_models":True,
"debug": False,
"criterion_weights": 1,
"learning_rate": 1e-4,
"use_cuda":True,
"feature_scale": 2,
}
params.update(dataset_params)
params.update(preprocessing_params)
config = {
# We pick the Bayes algorithm:
"algorithm": "bayes",
# Declare your hyperparameters in the Vizier-inspired format:
"parameters": {
"learning_rate": {"type": "float", "scalingType": "loguniform", "min": 1e-4, "max": 1e-3},
},
# Declare what we will be optimizing, and how:
"spec": {"metric": "train_bal_acc", "objective": "maximize"}, #test balance accuracy
}
opt = Optimizer(config, api_key=env.cached_data["comet_key"])
## 3.3gb
import gc
gc.collect()
# Finally, get experiments, and train your models:
for comet_exp in opt.get_experiments(project_name=env.project + "-" + params["prediction"]):
print(comet_exp)
param_copy = params.copy()
comet_exp.params
param_copy["learning_rate"] = comet_exp.get_parameter("learning_rate")
exp = env.create_experiment(
params["prediction"] + "-pytorch-" + params["model_name"], comet_exp
) #params["selected_label"] + "-hyperopt-" + params["model_name"]
exp.run(training.train_pytorch_model, param_copy, artifacts)
time.sleep(3)
del exp
import gc
gc.collect()
###Output
_____no_output_____ |
notebooks/week3_solutions.ipynb | ###Markdown
Week 3 Solutions Exercise 1Plot the distribution of survival time for all patients, and use the *bins* parameter to change the granularity of the distribution
###Code
import numpy as np
import pandas as pd
import plotnine as p9
# read in data
metabric = pd.read_csv("../data/metabric_clinical_and_expression_data.csv").dropna()
metabric.dtypes
# plot histogram
(
p9.ggplot(metabric, p9.aes("Survival_time"))
+ p9.geom_histogram(bins=30)
)
# bins=1000
###Output
_____no_output_____
###Markdown
Exercise 2Amend your code from exercise 1 to create interleaved distributions of survival time for ER positive and ER negative tumours.
###Code
# interleaved histograms of survival time for ER- and ER+
(
p9.ggplot(metabric, p9.aes("Survival_time", fill="ER_status"))
+ p9.geom_histogram(position="dodge", bins=50)
)
###Output
_____no_output_____
###Markdown
Exercise 3Generate a boxplot to compare survival time between different cancer types, incorporating the following features:1. Add points behind the boxes to show the raw data points2. Colour the boxes by Cancer type3. Separate the plot into individual facets for ER+ and ER-4. Rename the x axis to "Type of cancer"5. Rename the y axis to "Survival time (months)"6. Use a theme that removes the background colour and gridlines7. Remove the legend title8. Remove the X axis labels
###Code
# change the ER status values to give more informative facet titles in the plot
metabric["ER_status"] = metabric["ER_status"].replace("Positive", "ER+").replace("Negative", "ER-")
# generate the boxplot
(
# colouring by cancer type by passing Cancer_type to fill
p9.ggplot(metabric, p9.aes("Cancer_type", "Survival_time", fill="Cancer_type"))
# plot the points, using geom_jitter to minimise overlaps
+ p9.geom_jitter(alpha=0.9)
+ p9.geom_boxplot(alpha=0.5)
# plot the boxes
# separate the plot into facets
+ p9.facet_wrap("~ER_status", nrow=2)
# rename the x axis
+ p9.xlab("Type of cancer")
# rename the y axis
+ p9.ylab("Survival time (months)")
# specify theme to remove background colour and gridlines
+ p9.theme_classic()
+ p9.theme(
# remove the legend title
legend_title = p9.element_blank(),
# suppress the x axis labels
#axis_text_x = p9.element_blank(),
# remove the background from the facet titles
strip_background = p9.element_blank(),
#axis_text_x= p9.element_text(angle=45)
axis_text_x = p9.element_text(angle = 45, hjust = 1)
)
)
###Output
_____no_output_____
###Markdown
Week 3 Solutions Exercise 1Plot the distribution of survival time for all patients, and use the *bins* parameter to change the granularity of the distribution
###Code
import numpy as np
import pandas as pd
import plotnine as p9
# read in data
metabric = pd.read_csv("../data/metabric_clinical_and_expression_data.csv").dropna()
metabric.dtypes
# plot histogram
(
p9.ggplot(metabric, p9.aes("Survival_time"))
+ p9.geom_histogram(bins=30)
)
# bins=1000
###Output
_____no_output_____
###Markdown
Exercise 2Amend your code from exercise 1 to create interleaved distributions of survival time for ER positive and ER negative tumours.
###Code
# interleaved histograms of survival time for ER- and ER+
(
p9.ggplot(metabric, p9.aes("Survival_time", fill="ER_status"))
+ p9.geom_histogram(position="dodge", bins=50)
)
###Output
_____no_output_____
###Markdown
Exercise 3Generate a boxplot to compare survival time between different cancer types, incorporating the following features:1. Add points behind the boxes to show the raw data points2. Colour the boxes by Cancer type3. Separate the plot into individual facets for ER+ and ER-4. Rename the x axis to "Type of cancer"5. Rename the y axis to "Survival time (months)"6. Use a theme that removes the background colour and gridlines7. Remove the legend title8. Remove the X axis labels
###Code
# change the ER status values to give more informative facet titles in the plot
metabric["ER_status"] = metabric["ER_status"].replace("Positive", "ER+").replace("Negative", "ER-")
# generate the boxplot
(
# colouring by cancer type by passing Cancer_type to fill
p9.ggplot(metabric, p9.aes("Cancer_type", "Survival_time", fill="Cancer_type"))
# plot the points, using geom_jitter to minimise overlaps
+ p9.geom_jitter(alpha=0.9)
+ p9.geom_boxplot(alpha=0.5)
# plot the boxes
# separate the plot into facets
+ p9.facet_wrap("~ER_status", nrow=2)
# rename the x axis
+ p9.xlab("Type of cancer")
# rename the y axis
+ p9.ylab("Survival time (months)")
# specify theme to remove background colour and gridlines
+ p9.theme_classic()
+ p9.theme(
# remove the legend title
legend_title = p9.element_blank(),
# suppress the x axis labels
axis_text_x = p9.element_blank(),
# remove the background from the facet titles
strip_background = p9.element_blank(),
#axis_text_x= p9.element_text(angle=45)
#axis_text_x = p9.element_text(angle = 30, hjust = 1)
)
)
###Output
_____no_output_____ |
Neural_Network_and_Deep_learning/Logistic_Regression_with_a_Neural_Network_mindset_v6a (1).ipynb | ###Markdown
Logistic Regression with a Neural Network mindsetWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.**Instructions:**- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.**You will learn to:**- Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. UpdatesThis notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a' If you were working on a previous version:* You can find your prior work by looking in the file directory for the older files (named by version name).* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.* Please copy your work from the older versions to the new version, in order to submit your work for grading. List of Updates* Forward propagation formula, indexing now starts at 1 instead of 0.* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".* Fixed grammar in the comments.* Y_prediction_test variable name is used consistently.* Plot's axis label now says "iterations (hundred)" instead of "iterations".* When testing the model, the test image is normalized by dividing by 255. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
###Code
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
###Output
_____no_output_____
###Markdown
2 - Overview of the Problem set **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.Let's get more familiar with the dataset. Load the data by running the following code.
###Code
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
###Output
_____no_output_____
###Markdown
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
###Code
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
train_set_x_orig.shape[1]
#test_set_x_orig.shape
###Output
_____no_output_____
###Markdown
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image)Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
###Code
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
###Output
Number of training examples: m_train = 209
Number of testing examples: m_test = 50
Height/Width of each image: num_px = 64
Each image is of size: (64, 64, 3)
train_set_x shape: (209, 64, 64, 3)
train_set_y shape: (1, 209)
test_set_x shape: (50, 64, 64, 3)
test_set_y shape: (1, 50)
###Markdown
**Expected Output for m_train, m_test and num_px**: **m_train** 209 **m_test** 50 **num_px** 64 For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```pythonX_flatten = X.reshape(X.shape[0], -1).T X.T is the transpose of X```
###Code
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[1]*train_set_x_orig.shape[2]*3,-1)
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[1]*test_set_x_orig.shape[2]*3,-1)
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
###Output
train_set_x_flatten shape: (12288, 209)
train_set_y shape: (1, 209)
test_set_x_flatten shape: (12288, 50)
test_set_y shape: (1, 50)
sanity check after reshaping: [17 71 49 38 70]
###Markdown
**Expected Output**: **train_set_x_flatten shape** (12288, 209) **train_set_y shape** (1, 209) **test_set_x_flatten shape** (12288, 50) **test_set_y shape** (1, 50) **sanity check after reshaping** [17 31 56 22 33] To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). Let's standardize our dataset.
###Code
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
###Output
_____no_output_____
###Markdown
**What you need to remember:**Common steps for pre-processing a new dataset are:- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)- "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images.You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!****Mathematical expression of the algorithm**:For one example $x^{(i)}$:$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$The cost is then computed by summing over all training examples:$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$**Key steps**:In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm The main steps for building a Neural Network are:1. Define the model structure (such as number of input features) 2. Initialize the model's parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent)You often build 1-3 separately and integrate them into one function we call `model()`. 4.1 - Helper functions**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
###Code
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+ np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###Output
sigmoid([0, 2]) = [ 0.5 0.88079708]
###Markdown
**Expected Output**: **sigmoid([0, 2])** [ 0.5 0.88079708] 4.2 - Initializing parameters**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
###Code
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim,1),dtype=float)
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
###Output
w = [[ 0.]
[ 0.]]
b = 0
###Markdown
**Expected Output**: ** w ** [[ 0.] [ 0.]] ** b ** 0 For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagationNow that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.**Hints**:Forward Propagation:- You get X- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
###Code
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X)+b) # compute activation
cost =np.sum(-Y*np.log(A)-(1-Y)*np.log(1-A))/m # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = np.dot(X,(A-Y).T)/m
db = np.sum(A-Y)/m
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db
}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
###Output
dw = [[ 0.99845601]
[ 2.39507239]]
db = 0.00145557813678
cost = 5.80154531939
###Markdown
**Expected Output**: ** dw ** [[ 0.99845601] [ 2.39507239]] ** db ** 0.00145557813678 ** cost ** 5.801545319394553 4.4 - Optimization- You have initialized your parameters.- You are also able to compute a cost function and its gradient.- Now, you want to update the parameters using gradient descent.**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
###Code
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate*dw
b = b - learning_rate*db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
###Output
w = [[ 0.19033591]
[ 0.12259159]]
b = 1.92535983008
dw = [[ 0.67752042]
[ 1.41625495]]
db = 0.219194504541
###Markdown
**Expected Output**: **w** [[ 0.19033591] [ 0.12259159]] **b** 1.92535983008 **dw** [[ 0.67752042] [ 1.41625495]] **db** 0.219194504541 **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
###Code
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T,X)+b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if A[0,i] <= 0.5:
Y_prediction[0,i] =0
else:
Y_prediction[0,i] =1
#Y_prediction[0,i] = 1 if A[0, i] > 0.5 else 0
pass
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
###Output
predictions = [[ 1. 1. 0.]]
###Markdown
**Expected Output**: **predictions** [[ 1. 1. 0.]] **What to remember:**You've implemented several functions that:- Initialize (w,b)- Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent- Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.**Exercise:** Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize()
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
###Output
_____no_output_____
###Markdown
Run the following cell to train your model.
###Code
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
###Output
Cost after iteration 0: 0.693147
Cost after iteration 100: 0.709726
Cost after iteration 200: 0.657712
Cost after iteration 300: 0.614611
Cost after iteration 400: 0.578001
Cost after iteration 500: 0.546372
Cost after iteration 600: 0.518331
Cost after iteration 700: 0.492852
Cost after iteration 800: 0.469259
Cost after iteration 900: 0.447139
Cost after iteration 1000: 0.426262
Cost after iteration 1100: 0.406617
Cost after iteration 1200: 0.388723
Cost after iteration 1300: 0.374678
Cost after iteration 1400: 0.365826
Cost after iteration 1500: 0.358532
Cost after iteration 1600: 0.351612
Cost after iteration 1700: 0.345012
Cost after iteration 1800: 0.338704
Cost after iteration 1900: 0.332664
train accuracy: 91.38755980861244 %
test accuracy: 34.0 %
###Markdown
**Expected Output**: **Cost after iteration 0 ** 0.693147 $\vdots$ $\vdots$ **Train Accuracy** 99.04306220095694 % **Test Accuracy** 70.0 % **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
###Code
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
###Output
y = 1, you predicted that it is a "non-cat" picture.
###Markdown
Let's also plot the cost function and the gradients.
###Code
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
###Output
_____no_output_____
###Markdown
**Interpretation**:You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate **Reminder**:In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
###Code
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
###Output
learning rate is: 0.01
train accuracy: 71.29186602870814 %
test accuracy: 64.0 %
-------------------------------------------------------
learning rate is: 0.001
train accuracy: 74.16267942583733 %
test accuracy: 34.0 %
-------------------------------------------------------
learning rate is: 0.0001
train accuracy: 66.02870813397129 %
test accuracy: 34.0 %
-------------------------------------------------------
###Markdown
**Interpretation**: - Different learning rates give different costs and thus different predictions results.- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.- In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
y = 0.0, your algorithm predicts a "non-cat" picture.
|
satellite/merge_json.ipynb | ###Markdown
Merge train and validation json file.
###Code
import json
"""
train_json = "/home/ubuntu/data/complete_set/annotations/train.json"
test_json = "/home/ubuntu/data/complete_set/annotations/val.json"
complete_json = "/home/ubuntu/data/complete_set/annotations/complete.json"
"""
train_json = "/home/ubuntu/data/satellite/20190109/annotations/train.json"
test_json = "/home/ubuntu/data/satellite/20190109/annotations/val.json"
complete_json = "/home/ubuntu/data/satellite/20190109/annotations/complete.json"
# read json file
def read_json(json_file):
with open(json_file) as f:
return json.load(f)
train_info = read_json(train_json)
test_info = read_json(test_json)
print(len(train_info['annotations']))
print(len(test_info['annotations']))
print(train_info['images'][:2])
print(train_info['annotations'][:2])
print(test_info['images'][:2])
print(test_info['annotations'][:2])
# To merge these two json_files, we need to update the parts of annotations and images.
print("Update annatations part")
last_annotation_id = train_info['annotations'][-1]['id']
last_image_id = train_info['images'][-1]['id']
print(last_annotation_id)
print(last_image_id)
for annotation in test_info['annotations']:
annotation['id'] += last_annotation_id
annotation['image_id'] += last_image_id
train_info['annotations'].append(annotation)
print("Update images parts")
for image in test_info['images']:
image['id'] += last_image_id
train_info['images'].append(image)
print(len(train_info['annotations']))
print(len(train_info['images']))
print(train_info['annotations'][-1]['id'])
print(train_info['images'][-1]['id'])
# save the complete json data
with open(complete_json, 'w') as f:
json.dump(train_info, f)
# verify complete json file
with open(complete_json) as f:
data_info = json.load(f)
print(len(data_info['annotations']))
print(len(data_info['categories']))
print(len(data_info['images']))
###Output
129154
93
1625
|
notebooks/devise/notebooks/10 - inference only demo.ipynb | ###Markdown
inference only demoWe're done! We have a working pair of models which produce meaninful shared embeddings for text and images, which we can use to run image searches without relying on detailed metadata. The only thing to do now is ensure that the search process is fast enough to be practical, and lay out all of the pieces we need to run this outside of a notebook environment.
###Code
import torch
import pickle
import nmslib
import urllib
import numpy as np
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
nltk.download("punkt")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
load dataFirst we'll load a bunch of the lookup data we need to make this thing work. Nothing new here.
###Code
index_to_wordvec = np.load("/mnt/efs/models/index_to_wordvec.npy")
word_to_index = pickle.load(open("/mnt/efs/models/word_to_index.pkl", "rb"))
path_to_id = lambda x: x.split("/")[-1].split(".")[0]
image_ids = np.array(list(map(path_to_id, np.load("/mnt/efs/models/image_ids.npy"))))
###Output
_____no_output_____
###Markdown
load devise'd embeddings for all imagesWe pre-computed the learned visual-semantic embeddings for all of our images at the end of the last notebook, so we can just reload them here. Remember, they're sentence-space representations of the images, so all that needs to happen at query-time is the embedding of the query sentence into the same space, and a KNN lookup of the most similar images.
###Code
embeddings = np.load("/mnt/efs/models/embeddings.npy").reshape(-1, 4096)
###Output
_____no_output_____
###Markdown
utilsAgain, we'll create a couple of utility functions to shrink the sentence embedding process down to a single function call.
###Code
def sentence_to_indexes(sentence):
tokenised = word_tokenize(sentence)
indexes = [word_to_index[word] for word in tokenised if word in word_to_index]
return indexes
def embed(sentence):
indexes = (
[word_to_index["<s>"]] + sentence_to_indexes(sentence) + [word_to_index["</s>"]]
)
wvs = np.stack([index_to_wordvec[i] for i in indexes])
embedding = model(torch.Tensor([wvs]).cuda()).cpu().data.numpy()
return embedding.squeeze()
def embed_paragraph(paragraph):
sentences = sent_tokenize(paragraph)
if len(sentences) == 0:
return None
else:
embeddings = [embed(sentence) for sentence in sentences]
return np.array(embeddings).max(axis=0)
###Output
_____no_output_____
###Markdown
sentence embedding modelNow that we're only inferring an embedding for each sentence, we can ignore the `NLINet()` part of the network from notebook 8. We no longer need to classify sentence pairs or backpropagate any weights, so the remaining network is incredibly small and can be run without much trouble on a CPU. We saved the weights for this half of the network at the end of the last notebook, which we can inject into the matching network architecture here.
###Code
hidden_size = 2048
class SentenceEncoder(nn.Module):
def __init__(self):
super(SentenceEncoder, self).__init__()
self.enc_lstm = nn.LSTM(
input_size=300, hidden_size=hidden_size, num_layers=1, bidirectional=True
)
def forward(self, wv_batch):
embedded, _ = self.enc_lstm(wv_batch)
max_pooled = torch.max(embedded, 1)[0]
return max_pooled
model = SentenceEncoder().to(device)
model_path = "/mnt/efs/models/sentence-encoder-2018-10-08.pt"
model.load_state_dict(torch.load(model_path))
###Output
_____no_output_____
###Markdown
create nmslib search indexIn the previous notebooks we've run searches by brute-forcing our way across the dataset, measuring the distance from our query embedding to every other individual point in sentence-space. This is exact, but _super_ inefficient, especially in a high-volume, high-dimensional case like ours. Here, and in our demo app, we'll use an _approximate_-nearest neighbours algorithm which transforms our data in sentence-embedding space into a hierarchical graph/tree structure, allowing us to traverse the whole thing with very few calculations. The approximate-ness of this ANN algorithm is small, and in the end we lose very little information by transforming it into this structure. Similar libraries like [annoy](https://github.com/spotify/annoy) leverage roughly the same technique to find nearest neighbours in high dimensional space, but [nmslib has been shown to be the most efficient](https://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/) and we have no reason not to use it here. Pre-computing the index takes a while, but it vastly reduces the search time when we run a query. The index can also be saved in binary form and reloaded elsewhere, so we don't have to re-run that expensive computation every time we restart our demo. The python bindings for nmslib are very straightforward - we can create our fully functional index in just three lines of code.
###Code
index = nmslib.init(method="hnsw", space="cosinesimil")
index.addDataPointBatch(embeddings)
index.createIndex({"post": 2}, print_progress=True)
###Output
_____no_output_____
###Markdown
searchLet's run a search, returning the closest MIRO IDs and attaching them to a `/works` query URL
###Code
def search(query):
neighbour_indexes, _ = index.knnQuery(embed(query), k=10)
return image_ids[neighbour_indexes]
results = search("mri brain scan")
base_url = "https://wellcomecollection.org/works?query="
url_query = urllib.parse.quote_plus(" ".join(results))
print(base_url + url_query)
###Output
_____no_output_____ |
src/download_data.ipynb | ###Markdown
**Dependencies**
###Code
# standard libraries
import os
# local
from tokens import token as _token # you need to create this file with your token.
from download_functions import sync
###Output
_____no_output_____
###Markdown
**Run this line to download files**
###Code
# set parameters for download
token = _token
url = "https://ladsweb.modaps.eosdis.nasa.gov/archive/orders/501444664/"
destination = "../data/raw/order"
# create destination storage directory
try:
os.mkdir(destination)
except:
print ("Directory already exists.")
# download fils
sync(url, destination, token)
###Output
HTTP GET error code: 400
HTTP GET error message:
|
nlu/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_musical_instruments.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_musical_instruments.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 4 class Amazon Musical Instruments review classifier trainingWith the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode)You can achieve these results or even better on this dataset with training data:You can achieve these results or even better on this dataset with test data: 1. Install Java 8 and NLU
###Code
!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
###Output
--2021-05-05 04:56:20-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.109.133, 185.199.108.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1671 (1.6K) [text/plain]
Saving to: ‘STDOUT’
- 0%[ ] 0 --.-KB/s Installing NLU 3.0.0 with PySpark 3.0.2 and Spark NLP 3.0.1 for Google Colab ...
- 100%[===================>] 1.63K --.-KB/s in 0.001s
2021-05-05 04:56:20 (1.60 MB/s) - written to stdout [1671/1671]
###Markdown
2. Download musical instruments classification datasethttps://www.kaggle.com/eswarchandt/amazon-music-reviewsdataset with products rated between 5 classes
###Code
! wget http://ckl-it.de/wp-content/uploads/2021/01/Musical_instruments_reviews.csv
import pandas as pd
test_path = '/content/Musical_instruments_reviews.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(train_df, test_size=0.2)
train_df
###Output
_____no_output_____
###Markdown
3. Train Deep Learning Classifier using nlu.load('train.classifier')By default, the Universal Sentence Encoder Embeddings (USE) are beeing downloaded to provide embeddings for the classifier. You can use any of the 50+ other sentence Emeddings in NLU tough!You dataset label column should be named 'y' and the feature column with text data should be named 'text'
###Code
# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns
# Since there are no
trainable_pipe = nlu.load('train.classifier')
fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] )
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:50],output_level='document' )
preds
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
4. Evaluate the model
###Code
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['trained_classifier']))
###Output
precision recall f1-score support
average 0.58 1.00 0.73 15
good 0.50 0.62 0.55 13
very good 0.00 0.00 0.00 10
very poor 0.75 0.50 0.60 12
accuracy 0.58 50
macro avg 0.46 0.53 0.47 50
weighted avg 0.48 0.58 0.51 50
###Markdown
5. Lets try different Sentence Emebddings
###Code
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
# Load pipe with bert embeds
# using large embeddings can take a few hours..
# fitted_pipe = nlu.load('en.embed_sentence.bert_large_uncased train.classifier').fit(train_df)
fitted_pipe = nlu.load('en.embed_sentence.bert train.classifier').fit(train_df.iloc[:100])
# predict with the trained pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['trained_classifier']))
# Load pipe with bert embeds
fitted_pipe = nlu.load('embed_sentence.bert train.classifier').fit(train_df.iloc[:100])
# predict with the trained pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['trained_classifier']))
from sklearn.metrics import classification_report
trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['classifier_dl'].setMaxEpochs(90)
trainable_pipe['classifier_dl'].setLr(0.0005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_classifier']))
#preds
###Output
sent_small_bert_L12_768 download started this may take some time.
Approximate size to download 392.9 MB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
precision recall f1-score support
average 0.26 1.00 0.41 25
good 0.00 0.00 0.00 24
very good 0.00 0.00 0.00 23
very poor 0.00 0.00 0.00 24
accuracy 0.26 96
macro avg 0.07 0.25 0.10 96
weighted avg 0.07 0.26 0.11 96
###Markdown
6. evaluate on Test Data
###Code
preds = fitted_pipe.predict(test_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['trained_classifier']))
###Output
precision recall f1-score support
average 0.22 1.00 0.36 5
good 0.00 0.00 0.00 6
very good 0.00 0.00 0.00 7
very poor 1.00 0.17 0.29 6
accuracy 0.25 24
macro avg 0.30 0.29 0.16 24
weighted avg 0.30 0.25 0.15 24
###Markdown
7. Lets save the model
###Code
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
###Output
Stored model in ./model/classifier_dl_trained
###Markdown
8. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
###Code
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It was really good ')
preds
hdd_pipe.print_info()
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_musical_instruments.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 4 class Amazon Musical Instruments review classifier trainingWith the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode)You can achieve these results or even better on this dataset with training data:You can achieve these results or even better on this dataset with test data: 1. Install Java 8 and NLU
###Code
!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
###Output
--2021-05-05 04:56:20-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.109.133, 185.199.108.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1671 (1.6K) [text/plain]
Saving to: ‘STDOUT’
- 0%[ ] 0 --.-KB/s Installing NLU 3.0.0 with PySpark 3.0.2 and Spark NLP 3.0.1 for Google Colab ...
- 100%[===================>] 1.63K --.-KB/s in 0.001s
2021-05-05 04:56:20 (1.60 MB/s) - written to stdout [1671/1671]
###Markdown
2. Download musical instruments classification datasethttps://www.kaggle.com/eswarchandt/amazon-music-reviewsdataset with products rated between 5 classes
###Code
! wget http://ckl-it.de/wp-content/uploads/2021/01/Musical_instruments_reviews.csv
import pandas as pd
test_path = '/content/Musical_instruments_reviews.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(train_df, test_size=0.2)
train_df
###Output
_____no_output_____
###Markdown
3. Train Deep Learning Classifier using nlu.load('train.classifier')By default, the Universal Sentence Encoder Embeddings (USE) are beeing downloaded to provide embeddings for the classifier. You can use any of the 50+ other sentence Emeddings in NLU tough!You dataset label column should be named 'y' and the feature column with text data should be named 'text'
###Code
# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns
# Since there are no
trainable_pipe = nlu.load('train.classifier')
fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] )
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:50],output_level='document' )
preds
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
4. Evaluate the model
###Code
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['classifier_dl']))
###Output
precision recall f1-score support
average 0.58 1.00 0.73 15
good 0.50 0.62 0.55 13
very good 0.00 0.00 0.00 10
very poor 0.75 0.50 0.60 12
accuracy 0.58 50
macro avg 0.46 0.53 0.47 50
weighted avg 0.48 0.58 0.51 50
###Markdown
5. Lets try different Sentence Emebddings
###Code
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
# Load pipe with bert embeds
# using large embeddings can take a few hours..
# fitted_pipe = nlu.load('en.embed_sentence.bert_large_uncased train.classifier').fit(train_df)
fitted_pipe = nlu.load('en.embed_sentence.bert train.classifier').fit(train_df.iloc[:100])
# predict with the trained pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['classifier_dl']))
# Load pipe with bert embeds
fitted_pipe = nlu.load('embed_sentence.bert train.classifier').fit(train_df.iloc[:100])
# predict with the trained pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
from sklearn.metrics import classification_report
print(classification_report(preds['y'], preds['classifier_dl']))
from sklearn.metrics import classification_report
trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['trainable_classifier_dl'].setMaxEpochs(90)
trainable_pipe['trainable_classifier_dl'].setLr(0.0005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['classifier_dl']))
#preds
###Output
sent_small_bert_L12_768 download started this may take some time.
Approximate size to download 392.9 MB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
precision recall f1-score support
average 0.26 1.00 0.41 25
good 0.00 0.00 0.00 24
very good 0.00 0.00 0.00 23
very poor 0.00 0.00 0.00 24
accuracy 0.26 96
macro avg 0.07 0.25 0.10 96
weighted avg 0.07 0.26 0.11 96
###Markdown
6. evaluate on Test Data
###Code
preds = fitted_pipe.predict(test_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['classifier_dl']))
###Output
precision recall f1-score support
average 0.22 1.00 0.36 5
good 0.00 0.00 0.00 6
very good 0.00 0.00 0.00 7
very poor 1.00 0.17 0.29 6
accuracy 0.25 24
macro avg 0.30 0.29 0.16 24
weighted avg 0.30 0.25 0.15 24
###Markdown
7. Lets save the model
###Code
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
###Output
Stored model in ./model/classifier_dl_trained
###Markdown
8. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
###Code
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It was really good ')
preds
hdd_pipe.print_info()
###Output
_____no_output_____ |
25_re-continued.ipynb | ###Markdown
\d Matches any decimal digit; this is equivalent to the class [0-9]. \D Matches any non-digit character; this is equivalent to the class [^0-9]. \s Matches any whitespace character; this is equivalent to the class [ \t\n\r\f\v]. \S Matches any non-whitespace character; this is equivalent to the class [^ \t\n\r\f\v]. \w Matches any alphanumeric character; this is equivalent to the class [a-zA-Z0-9_]. \W Matches any non-alphanumeric character; this is equivalent to the class [^a-zA-Z0-9_]. \b Word boundary. \B Another zero-width assertion, this is the opposite of \b
###Code
pattern = re.compile(r'\bHa')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
print(text_to_search[66:75])
###Output
Ha HaHa
###Markdown
^ Matches at the beginning of lines $ Matches at the end of a line \A Matches only at the start of the string. When not in MULTILINE mode, \A and ^ are effectively the same. In MULTILINE mode, they’re different: \A still matches only at the beginning of the string, but ^ may match at any location inside the string that follows a newline character. \Z Matches only at the end of the string.
###Code
pattern = re.compile(r'\d\d\d.\d\d\d.\d\d\d\d')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
with open('data.txt', 'r') as f:
contents = f.read()
pattern = re.compile(r'\d\d\d.\d\d\d.\d\d\d\d')
matches = pattern.finditer(contents)
for match in matches:
print(match)
pattern = re.compile(r'[89]00.\d\d\d.\d\d\d\d')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
with open('data.txt', 'r') as f:
contents = f.read()
pattern = re.compile(r'[89]00.\d\d\d.\d\d\d\d')
matches = pattern.finditer(contents)
for match in matches:
print(match)
pattern = re.compile(r'[1-5]')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
pattern = re.compile(r'[^a-zA-Z0-9]')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
pattern = re.compile(r'[^b]at')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
###Output
<_sre.SRE_Match object; span=(272, 275), match='cat'>
<_sre.SRE_Match object; span=(277, 280), match='mat'>
###Markdown
Character Description Example Try it [] A set of characters "[a-m]" \ Signals a special sequence (can also be used to escape special characters) "\d" . Any character (except newline character) "he..o" ^ Starts with "^hello" $ Ends with "world$" * Zero or more occurrences "aix*" + One or more occurrences "aix+" {} Exactly the specified number of occurrences "al{2}" | Either or "falls|stays" () Capture and group
###Code
pattern = re.compile(r'Mr\.')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
emails = '''
[email protected]
[email protected]
[email protected]
'''
pattern = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
matches = pattern.finditer(emails)
for match in matches:
print(match)
urls = '''
https://www.google.com
http://coreyms.com
https://youtube.com
https://www.nasa.gov
'''
pattern = re.compile(r'https?://(www\.)?(\w+)(\.\w+)')
subbed_urls = pattern.sub(r'\2\3', urls)
print(subbed_urls)
pattern = re.compile(r'\d{3}.\d{3}.\d{4}')
matches = pattern.findall(text_to_search)
for match in matches:
print(match)
pattern = re.compile(r'start', re.I)
matches = pattern.findall(sentence)
for match in matches:
print(match)
###Output
Start
|
Chapman/Ch6-Problem_6-26.ipynb | ###Markdown
Excercises Electric Machinery Fundamentals Chapter 6 Problem 6-26
###Code
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Description A 460-V 50-hp six-pole $\Delta$ -connected 60-Hz three-phase induction motor has a full-load slip of 4 percent, an efficiency of 91 percent, and a power factor of 0.87 lagging. At start-up, the motor develops 1.75 times the full-load torque but draws 7 times the rated current at the rated voltage. This motor is to be started with an autotransformer reduced voltage starter. (a) * What should the output voltage of the starter circuit be to reduce the starting torque until it equals the rated torque of the motor? (b) * What will the motor starting current and the current drawn from the supply be at this voltage?
###Code
Vt = 460 # [V]
Wperhp = 746 # official conversion rate of "electrical horsepowers"
Pout = 50 * Wperhp # [W]
PF = 0.87
eta = 0.91
times_tor = 1.75
times_cur = 7
###Output
_____no_output_____
###Markdown
SOLUTION (a)The starting torque of an induction motor is proportional to the square of $V_{TH}$ ,$$\frac{\tau_\text{start2}}{\tau_\text{start1}} = \left(\frac{V_\text{TH2}}{V_\text{TH1}}\right)^2 = \left(\frac{V_\text{T2}}{V_\text{T2}}\right)^2$$ If a torque of 1.75 $\tau_{rated}$ is produced by a voltage of 460 V, then a torque of 1.00 $\tau_\text{rated}$ would be produced by a voltage of:$$\frac{1.00\tau_\text{rated}}{1.75\tau_\text{rated}} = \left(\frac{V_{T2}}{460V}\right)^2$$
###Code
Vt2 = sqrt(1.00/times_tor * Vt**2)
print('''
Vt2 = {:.0f} V
==========='''.format(Vt2))
###Output
Vt2 = 348 V
===========
###Markdown
(b)The motor starting current is directly proportional to the starting voltage, so$$I_{L2} = \left(\frac{V_{T2}}{V_T}\right)I_{L1}$$
###Code
Il2_Il1 = Vt2/Vt
Il1_Irated = times_cur
Il2_Irated = Il2_Il1 * Il1_Irated
print('''
Il2 = {:.2f} Irated
================='''.format(Il2_Irated))
###Output
Il2 = 5.29 Irated
=================
###Markdown
The input power to this motor is:$$P_\text{in} = \frac{P_\text{out}}{\eta}$$
###Code
Pin = Pout / eta
print('Pin = {:.1f} kW'.format(Pin/1000))
###Output
Pin = 41.0 kW
###Markdown
The rated current is equal to:$$I_\text{rated} = \frac{P_\text{in}}{\sqrt{3}V_TPF}$$
###Code
Irated = Pin / (sqrt(3)*Vt*PF)
print('Irated = {:.2f} A'.format(Irated))
###Output
Irated = 59.13 A
###Markdown
Therefore, the motor starting current is
###Code
Il2 = Il2_Irated * Irated
print('''
Il2 = {:.1f} A
============='''.format(Il2))
###Output
Il2 = 312.9 A
=============
###Markdown
The turns ratio of the autotransformer that produces this starting voltage is:$$\frac{N_{SE}+N_C}{N_C} = \frac{V_T}{V_{T2}} = a$$
###Code
a = Vt/Vt2
print('a = {:.3f}'.format(a))
###Output
a = 1.323
###Markdown
so the current drawn from the supply will be:$$I_\text{line} = \frac{I_\text{start}}{a}$$
###Code
Iline = Il2 / a
print('''
Iline = {:.0f} A
============='''.format(Iline))
###Output
Iline = 237 A
=============
###Markdown
Excercises Electric Machinery Fundamentals Chapter 6 Problem 6-26
###Code
%pylab notebook
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Description A 460-V 50-hp six-pole $\Delta$ -connected 60-Hz three-phase induction motor has a full-load slip of 4 percent, an efficiency of 91 percent, and a power factor of 0.87 lagging. At start-up, the motor develops 1.75 times the full-load torque but draws 7 times the rated current at the rated voltage. This motor is to be started with an autotransformer reduced voltage starter. (a) * What should the output voltage of the starter circuit be to reduce the starting torque until it equals the rated torque of the motor? (b) * What will the motor starting current and the current drawn from the supply be at this voltage?
###Code
Vt = 460 # [V]
Wperhp = 746 # official conversion rate of "electrical horsepowers"
Pout = 50 * Wperhp # [W]
PF = 0.87
eta = 0.91
times_tor = 1.75
times_cur = 7
###Output
_____no_output_____
###Markdown
SOLUTION (a)The starting torque of an induction motor is proportional to the square of $V_{TH}$ ,$$\frac{\tau_\text{start2}}{\tau_\text{start1}} = \left(\frac{V_\text{TH2}}{V_\text{TH1}}\right)^2 = \left(\frac{V_\text{T2}}{V_\text{T2}}\right)^2$$ If a torque of 1.75 $\tau_{rated}$ is produced by a voltage of 460 V, then a torque of 1.00 $\tau_\text{rated}$ would be produced by a voltage of:$$\frac{1.00\tau_\text{rated}}{1.75\tau_\text{rated}} = \left(\frac{V_{T2}}{460V}\right)^2$$
###Code
Vt2 = sqrt(1.00/times_tor * Vt**2)
print('''
Vt2 = {:.0f} V
==========='''.format(Vt2))
###Output
Vt2 = 348 V
===========
###Markdown
(b)The motor starting current is directly proportional to the starting voltage, so$$I_{L2} = \left(\frac{V_{T2}}{V_T}\right)I_{L1}$$
###Code
Il2_Il1 = Vt2/Vt
Il1_Irated = times_cur
Il2_Irated = Il2_Il1 * Il1_Irated
print('''
Il2 = {:.2f} Irated
================='''.format(Il2_Irated))
###Output
Il2 = 5.29 Irated
=================
###Markdown
The input power to this motor is:$$P_\text{in} = \frac{P_\text{out}}{\eta}$$
###Code
Pin = Pout / eta
print('Pin = {:.1f} kW'.format(Pin/1000))
###Output
Pin = 41.0 kW
###Markdown
The rated current is equal to:$$I_\text{rated} = \frac{P_\text{in}}{\sqrt{3}V_TPF}$$
###Code
Irated = Pin / (sqrt(3)*Vt*PF)
print('Irated = {:.2f} A'.format(Irated))
###Output
Irated = 59.13 A
###Markdown
Therefore, the motor starting current is
###Code
Il2 = Il2_Irated * Irated
print('''
Il2 = {:.1f} A
============='''.format(Il2))
###Output
Il2 = 312.9 A
=============
###Markdown
The turns ratio of the autotransformer that produces this starting voltage is:$$\frac{N_{SE}+N_C}{N_C} = \frac{V_T}{V_{T2}} = a$$
###Code
a = Vt/Vt2
print('a = {:.3f}'.format(a))
###Output
a = 1.323
###Markdown
so the current drawn from the supply will be:$$I_\text{line} = \frac{I_\text{start}}{a}$$
###Code
Iline = Il2 / a
print('''
Iline = {:.0f} A
============='''.format(Iline))
###Output
Iline = 237 A
=============
|
Numerical Differentiation.ipynb | ###Markdown
Numerical Differentiation_By Dhruv Jain_ **Objective: Implementaion of various numerical differentiation schemes**
###Code
# Key libraries: Numpy(for mathematical procedures) and matplotlib(to create plots)
import numpy as np
import matplotlib.pyplot as plt
import copy
###Output
_____no_output_____
###Markdown
Forward Differentiation
###Code
def for_diff(func, x, h, approx_order=1):
"""Dhruv Jain, 1 Dec 2021
Obj: Compute first and second order Forward Differentitation approximation of f'(x)
Args:
func: function, f(x) whose f'(x) needs to be computed
x: float, value at which to approximate f'(x)
h: float, perturbation
approx_order: int, optional, DEFAULT = 1
1: First order approximation of f'(x)
2: Second order approximation of f'(x)
Output:
First or second approximation of f'(x)
"""
if h == 0 or h > 1e-4:
print('Recheck h')
f0 = func(x)
f1 = func(x+h)
# First order approximation of f'(x)
if approx_order == 1:
df = (f1-f0)/h
# Second order approximation of f'(x)
elif approx_order == 2:
f2 = func(x+2*h)
df = (-3*f0 + 4*f1 - f2)/(2*h)
else:
print('approx_order should be 1 or 2')
return 0
return df
###Output
_____no_output_____
###Markdown
Central Differentiation
###Code
def cen_diff(func, x, h, approx_order=2):
"""Dhruv Jain, 1 Dec 2021
Obj: Compute second and fourth order CENTRAL Differentitation approximation of f'(x)
Args:
func: function, f(x) whose f'(x) needs to be computed
x: float, value at which to approximate f'(x)
h: float, perturbation
approx_order: int, optional, DEFAULT = 2
2: Second order approximation of f'(x)
4: Fourth order approximation of f'(x)
Output:
Second or Fourth approximation of f'(x)
"""
if h == 0 or h > 1e-4:
print('Recheck h')
f_n1 = func(x-h)
f1 = func(x+h)
# Second order approximation of f'(x)
if approx_order == 2:
df = (f1-f_n1)/(2*h)
# Fourth order approximation of f'(x)
elif approx_order == 4:
f_n2 = func(x-2*h)
f2 = func(x+2*h)
df = (-f2 + 8*f1 - 8*f_n1 + f_n2)/(12*h)
else:
print('approx_order should be 1 or 2')
return 0
return df
###Output
_____no_output_____
###Markdown
Complex Step Differentiation
###Code
def complex_diff(func, x, h):
"""Dhruv Jain, 1 Dec 2021
Obj: Compute second order COMPLEX STEP DIFFERENTIATION Differentitation approximation of f'(x)
This method is useful as it avoids cancellation error
Args:
func: function, f(x) whose f'(x) needs to be computed
x: float, value at which to approximate f'(x)
h: float, perturbation
Output:
"""
if h == 0 or h > 1e-4:
print('Recheck h')
df = np.imag(func(x+1j*h))/h
return df
###Output
_____no_output_____
###Markdown
Example
###Code
# Example function
def func_ex(x):
return x**3 + np.sin(x)**2 - x + 1
# Derivative of func_ex
def dfunc_ex(x):
return 3*x**2 + 2*np.sin(x)*np.cos(x) - 1
# Call the various differentiation schemes
x = 3
h = np.finfo(float).eps*1000000
fd_1o = for_diff(func_ex, x, h, approx_order=1)
fd_2o = for_diff(func_ex, x, h, approx_order=2)
cd_2o = cen_diff(func_ex, x, h, approx_order=2)
cd_4o = cen_diff(func_ex, x, h, approx_order=4)
h_com = 1e-8
complex_2o= complex_diff(func_ex, x, h_com)
print('Analytical Derivative: %0.16f'%dfunc_ex(x))
print('Difference between the analytical derivative and the other methods:\n')
print('Difference: (First order forward differentiation): %0.16f'%(dfunc_ex(x)-fd_1o))
print('Difference: (Second order forward differentiation): %0.16f'%(dfunc_ex(x)-fd_2o))
print('Difference: (Second order central differentiation): %0.16f'%(dfunc_ex(x)-cd_2o))
print('Difference: (Fourth order central differentiation): %0.16f'%(dfunc_ex(x)-cd_4o))
print('Difference: (Second order complex step differentiation): %0.16f'%(dfunc_ex(x)-complex_2o))
###Output
Analytical Derivative: 25.7205845018010741
Difference between the analytical derivative and the other methods:
Difference: (First order forward differentiation): -0.0000074981989258
Difference: (Second order forward differentiation): -0.0000154981989269
Difference: (Second order central differentiation): 0.0000005018010754
Difference: (Fourth order central differentiation): 0.0000045018010724
Difference: (Second order complex step differentiation): 0.0000000000000000
|
Linear Regression/Linear_regression_1(medical_charge).ipynb | ###Markdown
Problem: A linear regression model to estimate the annual medical expenditure for new customers. Data :A CSV file containing verified historical data of 1300 customers having actual medical charges incurred and other information such as their age, sex, BMI, children, smoking habits and region of residence. Downloding the Data
###Code
import pandas as pd
import numpy as np
import plotly.express as px
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
medical_charges_url = 'https://raw.githubusercontent.com/Dixit01/100daysofML/main/Data/ML_Data/medical_charges.csv'
df = pd.read_csv(medical_charges_url)
df
###Output
_____no_output_____
###Markdown
Exploratory Analysis and Visualization
###Code
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
age
###Code
df.age.describe()
fig = px.histogram(df,x='age',marginal='box', nbins=47, title='Distribution of Age')
fig.update_layout(bargap=0.1)
fig.show()
df.bmi.describe()
###Output
_____no_output_____
###Markdown
bmi
###Code
fig = px.histogram(df,x='bmi',marginal='box',
color_discrete_sequence=['green', 'blue'],
title='Distribution of BMI')
fig.update_layout(bargap=0.1)
fig.show()
###Output
_____no_output_____
###Markdown
Charges
###Code
df.charges.describe()
fig = px.histogram(df,x='charges',marginal='box', color='smoker',
color_discrete_sequence=['green', 'blue'],
title='Annual Medical Charges')
fig.update_layout(bargap=0.1)
fig.show()
fig = px.histogram(df,x='charges',marginal='box', color='sex',
color_discrete_sequence=['pink', 'grey'],
title='Annual Medical Charges')
fig.update_layout(bargap=0.1)
fig.show()
fig = px.histogram(df,x='charges',marginal='box', color='region',
color_discrete_sequence=['green', 'blue','black','orange'],
title='Annual Medical Charges Over Region')
fig.update_layout(bargap=0.1)
fig.show()
df.children.value_counts()
px.histogram(df, x='children', color='smoker', title='Smoker')
px.histogram(df, x='smoker', color='sex', title='Smoker')
px.histogram(df, x='age', color='smoker', title='Smoker')
fig = px.scatter(df,
x='age',
y='charges',
color='smoker',
opacity=0.8,
hover_data=['sex'],
title='Age vs. Charges')
fig.update_traces(marker_size=5)
fig.show()
fig = px.scatter(df, x = "bmi", y = "charges", color = "smoker",
opacity = 0.8,hover_data=["sex"],title='Bmi vs. Charges')
fig.update_traces(marker_size=5)
fig.show()
fig = px.scatter(df, x = "region", y = "charges", color = "smoker",
opacity = 0.8,title='sex vs. Charges')
fig.update_traces(marker_size=5)
fig.show()
fig = px.scatter(df, x = "children", y = "charges", color = "smoker",
opacity = 0.8,hover_data=["sex"],title='region vs. Charges')
fig.update_traces(marker_size=5)
fig.show()
fig = px.scatter(df, x = "sex", y = "charges", color = "smoker",
opacity = 0.8,hover_data=["sex"],title='children vs. Charges')
fig.update_traces(marker_size=5)
fig.show()
###Output
_____no_output_____
###Markdown
Correlation
###Code
df.corr()
sns.heatmap(df.corr(), cmap='Reds', annot=True)
plt.title('Correlation Matrix');
df.charges.corr(df.age)
###Output
_____no_output_____
###Markdown
Encoding
###Code
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
sex_code = LabelEncoder()
# Let's frist start changing sex column
sex_code.fit(df.sex.drop_duplicates())
df["sex_code"] = sex_code.transform(df['sex'])
df
# Then smoker column
smoker_code = LabelEncoder()
smoker_code.fit(df.smoker.drop_duplicates())
df['smoker_code'] = smoker_code.transform(df['smoker'])
df
enc = OneHotEncoder()
enc.fit(df[['region']])
enc.categories_
one_hot = enc.transform(df[['region']]).toarray()
one_hot
df[['northeast', 'northwest', 'southeast', 'southwest']] = one_hot
df
from sklearn.preprocessing import StandardScaler
numeric_cols = ['age', 'bmi', 'children']
scaler = StandardScaler()
scaler.fit(df[numeric_cols])
print(scaler.mean_)
print(scaler.var_)
df_num_inputs = scaler.transform(df[numeric_cols])
df_num_inputs
cat_cols = ['smoker_code', 'sex_code', 'northeast', 'northwest', 'southeast', 'southwest']
df_categorical_data = df[cat_cols].values
df_categorical_data
inputs = np.concatenate((df_num_inputs, df_categorical_data), axis=1)
targets = df.charges
###Output
_____no_output_____
###Markdown
Linear Regressioncharges=w1×age+w2*bmi.............
###Code
# train-test split
from sklearn.model_selection import train_test_split
inputs_train, inputs_test, targets_train, targets_test = train_test_split(inputs, targets, test_size=0.15)
# For root mean Square Error
def rmse(targets, predictions):
return np.sqrt(np.mean(np.square(targets - predictions)))
# linear Regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# Create and train the model
model = LinearRegression().fit(inputs_train, targets_train)
# Generate predictions
predictions = model.predict(inputs_test)
# Compute loss to evalute the model
loss = rmse(targets_test, predictions)
print('Loss:', loss)
model.coef_
model.intercept_
weights_df = pd.DataFrame({
'feature': np.append(numeric_cols + cat_cols, 1),
'weight': np.append(model.coef_, model.intercept_)
})
weights_df.sort_values('weight', ascending=False)
###Output
_____no_output_____ |
Streamlit/Deploy_Heroku/Streamlit_Heroku.ipynb | ###Markdown
Upload streamlit app to heroku 1. Set up 1.1 Setting up virtual environmentSimilar to Flask apps we want to set up a virtual environment From the command line:1. mkdir app - app is the name of folder2. cd app - Changes directory to newly created folder3. python -m venv env - Creates virtual environment4. source env/bin/activate - Activates virtual environment5. pip install flask - Virtual environment will not have installed libraries so you have to install flask ```bashmkdir appcd apppython -m venv envsource env/bin/activatepip install voilapip install jupyter numpy matplotlib``` 1.2 Create streamlit app and test locally 1.2.1 Create interactive notebook
###Code
%%writefile app.py
import streamlit as st
st.title("Hello World")
st.write("Pick an option")
keys = ["Normal","Uniform"]
dist_key = st.selectbox("Which Distribution do you want?",keys)
st.write("You have chosen {}".format(dist_key))
###Output
Writing app.py
###Markdown
1.2.2 Test on your machineIn the command line of the same directory```bashstreamlit run app.py```
###Code
#! streamlit run app.py
###Output
_____no_output_____
###Markdown
1.3 Upload to heroku server1. First, create an Heroku account - https://signup.heroku.com/ 2. Install Heroku on your machine: - https://devcenter.heroku.com/articles/getting-started-with-pythonset-up 1.3.1 Create files necessary for Heroku app- Do this in the main directory 1.3.1.1 requirements.txt- This can either be manually entered or from the commmand line with the following code:```bashpip freeze > requirements.txt```
###Code
%%writefile requirements.txt
streamlit==0.49.0
###Output
Writing requirements.txt
###Markdown
1.3.1.2 setup.sh
###Code
%%writefile setup.sh
mkdir -p ~/.streamlit/
echo "\
[general]\n\
email = \"[email protected]\"\n\
" > ~/.streamlit/credentials.toml
echo "\
[server]\n\
headless = true\n\
enableCORS=false\n\
port = $PORT\n\
" > ~/.streamlit/config.toml
###Output
Writing setup.sh
###Markdown
1.3.1.3 Procfile- IMPORTANT: the name of your notebook goes at the end of the procfile after: - "--enable_nbextensions=True"
###Code
%%writefile Procfile
web: sh setup.sh && streamlit run app.py
###Output
Writing Procfile
|
masterclass-jan18/d2s1-innovation-leadership-and-webscraping/notebook-webscraping.ipynb | ###Markdown
Notebook: Web Scraping & Web Crawling**Author List**: Alexander Fred Ojala**Original Sources**: https://www.crummy.com/software/BeautifulSoup/bs4/doc/ & https://www.dataquest.io/blog/web-scraping-tutorial-python/**License**: Feel free to do whatever you want to with this code**Compatibility:** Python 2.x and 3.x Other Web Scraping toolsThis notebook mainly goes over how to get data with the Python package `BeautifulSoup`. However, there are many other Python packages that can be used for scraping.Two are very popular and widely used:* **Selenium:** Javascript based Pyton scraper that can act as a human when visiting websites* **Scrapy:** For automated scripting for long periods of time Scrapy is really popular. Table of Contents(Clickable document links)___ [0: Pre-steup](sec0)Document setup and Python 2 and Python 3 compability [1: Simple webscraping intro](sec1)Simple example of webscraping on a premade HTML template [2: Scrape Data-X Schedule](sec2)Find and scrape the current Data-X schedule. [3: Scrape Images and Files](sec3)Scrape a website of Images, PDF's, CSV data or any other file type. [Breakout Problem: Scrape Weather Data](secBK)Scrape real time weather data in Berkeley. [Appendix](sec5) [Scrape Bloomberg sitemap for political news headlines](sec6) [Webcrawl Twitter, recusrive URL link fetcher + depth](sec7) [SEO, visualize webite categories as a tree](sec8) Pre-Setup
###Code
# stretch Jupyter coding blocks to fit screen
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# if 100% it would fit the screen
# make it run on py2 and py3
from __future__ import division, print_function
###Output
_____no_output_____
###Markdown
Webscraping introIn order to scrape content from a website we first need to download the HTML contents of the website. This can be done with the Python library **requests** (with its `.get` method).Then when we want to extract certain information from a website we use the scraping tool **BeautifulSoup4** (import bs4). In order to extract information with beautifulsoup we have to create a soup object from the HTML source code of a website.
###Code
import requests # The requests library is an
# HTTP library for getting content and posting etc.
import bs4 as bs # BeautifulSoup4 is a Python library
# for pulling data out of HTML and XML code.
# we can query markup languages for specific content
###Output
_____no_output_____
###Markdown
Scraping a simple website
###Code
source = requests.get("https://afo.github.io/data-x")
# a GET request will download the HTML webpage.
print(source) # If <Response [200]> then
# the website has been downloaded succesfully
###Output
_____no_output_____
###Markdown
**Different types of repsonses:**Generally status code starting with 2 indicates success. Status code starting with 4 or 5 indicates error
###Code
print(source.content) # This is the HTML content of the website,
# as you can see it's quite hard to decipher
print(type(source.content)) # type byte in Python 3
# Convert source.content to a beautifulsoup object
# beautifulsoup can parse (extract specific information) HTML code
soup = bs.BeautifulSoup(source.content, features='lxml')
# we pass in the source content
# features specifies what type of code we are parsing,
# here 'lxml' specifies that we want beautiful soup to parse HTML code
print(type(soup))
print(soup) # looks a lot nicer!
###Output
_____no_output_____
###Markdown
Above we printed the HTML code of the website, decoded as a beautiful soup object` `: are all the HTML tags, that specifies certain sections, stylings etc of the website, for more info: https://www.w3schools.com/tags/ref_byfunc.asp**class and id: ** Are attributes of HTML tags, they are used as hooks to give unique styling to certain elements and an id for sections / parts of the page.Full list of HTML tags: https://developer.mozilla.org/en-US/docs/Web/HTML/Element Suppose we want to extract content that is shown on the website
###Code
# Inside the <body> tag of the website is where all the main content is
print(soup.body)
print(soup.title) # Title of the website
print(soup.find('title')) # same as .title
# If we want to extract specific text
print(soup.find('p')) # will only return first <p> tag
print(soup.find('p').text) # extracts the string within the <p> tag, strips it of tag
# If we want to extract all <p> tags
print(soup.find_all('p')) # returns list of all <p> tags
# we can also search for classes within all tags, using class_
# note _ is used to distinguish with Python's builtin class function
print(soup.find(class_='header'))
# We can also find tags with a speific id
print(soup.find(id='second'))
print(soup.find_all(class_='regular_list'))
for p in soup.find_all('p'): # print all text paragraphs on the webpage
print(p.text)
# Extract links / urls
# Links in html is usually coded as <a href="url">
# where the link is url
print(soup.a)
print(type(soup.a))
soup.a.get('href')
# to get the link from href attribute
# if we want to list links and their text info
links = soup.find_all('a')
for l in links:
print("\nInfo about {}: ".format(l.text), l.get('href'))
# then we have extracted the link
###Output
_____no_output_____
###Markdown
Data-X website Scraping Now let us scrape the current Syllabus Schedule from the Data-X website
###Code
source = requests.get('https://data-x.blog/').content
# get the source content
soup = bs.BeautifulSoup(source,'lxml')
print(soup.prettify())
# .prettify() method makes the HTML code more readable
# as you can see this code is more difficult
# to read then the simple example above
# mostly because this is a real Wordpress website
###Output
_____no_output_____
###Markdown
Print the Title of the website
###Code
print(soup.find('title').text)
# check that we are at the correct website
###Output
_____no_output_____
###Markdown
Extract all paragraphs of text
###Code
for p in soup.find_all('p'):
print(p.text)
###Output
_____no_output_____
###Markdown
Look at the navigation bar
###Code
navigation_bar = soup.find('nav')
print(navigation_bar)
# These are the linked subpages in the navigation bar
nav_bar = navigation_bar.text
print(nav_bar)
###Output
_____no_output_____
###Markdown
Scrape the Syllabus of its content(maybe to use in an App)
###Code
# Now we want to find the Syllabus,
# however we are at the root web page, not displaying the Syllabus
# Get all links from navigation bar at the data-x home webpage
for url in navigation_bar.find_all('a'):
link = url.get('href')
if 'data-x.blog' in link: # check link to a subpage
print(link)
if 'syllabus' in link:
syllabus_url = link
# syllabus is located at https://data-x.blog/syllabus/
print(syllabus_url)
# Open new connection to the Syllabus url. Replace soup object.
source = requests.get(syllabus_url).content
soup = bs.BeautifulSoup(source, 'lxml')
print(soup.body.prettify())
# we can see that the Syllabus is built up of <td>, <tr> and <table> tags
###Output
_____no_output_____
###Markdown
Find the course schedule table from the syllabus: Usually organized data in HTML format on a website is stored in tables under `, ,` and `` tags. Here we want to extract the information in the Data-X syllabus.**NOTE:** To identify element, class or id name of the object of your interest on a web page, you can go to the link address in your browser, under 'more tools' option click __'developer tools'__. This opens the 'Document object Model' of the webpage. Hover on the element of your interest on the webpage to check its location. This will help you in deciding which parts of 'soup content' you want to parse. More info at: https://developer.chrome.com/devtools
###Code
# We can see that course schedule is in <table><table/> elements
# We can also get the table
full_table = soup.find_all('table')
# A new row in an HTML table starts with <tr> tag
# A new column entry is defined by <td> tag
table_result = list()
for table in full_table:
for row in table.find_all('tr'):
row_cells = row.find_all('td') # find all table data
row_entries = [cell.text for cell in row_cells]
print(row_entries)
table_result.append(row_entries)
# get all the table data into a list
# We can also read it in to a Pandas DataFrame
import pandas as pd
pd.set_option('display.max_colwidth', 10000)
df = pd.DataFrame(table_result)
df
# Pandas can also grab tables from a website automatically
import pandas as pd
import html5lib
# requires html5lib:
#!conda install --yes html5
dfs = pd.read_html('https://data-x.blog/syllabus/')
# returns a list of all tables at url
dfs
print(type(dfs)) #list of tables
print(len(dfs)) # we only have one table
print(type(dfs[0])) # stored as DataFrame
df = pd.concat(dfs,ignore_index=True)
# Looks so-so, however striped from break line characters etc.
df
# Make it nicer
# Assign column names
df.columns= ['Part','Detailed Description']
# Assing week number
weeks = list()
for i in range(1,13):
weeks = weeks+['Week{}'.format(i) for tmp in range(4)]
df['Week'] = weeks
df.head()
# Set Week and Part as Multiindex
df = df.set_index(['Week','Part'])
df.head(10)
###Output
_____no_output_____
###Markdown
Scrape images and other files
###Code
# As we can see there are two images on the data-x.blog/resources
# say that we want to download them
# Images are displayed with the <img> tag in HTML
# open connection and create new soup
raw = requests.get('https://data-x.blog/resources/').content
soup = bs.BeautifulSoup(raw,features='lxml')
print(soup.find('img'))
# as we can see below the image urls
# are stored in the src attribute inside the img tag
# Parse all url to the images
img_urls = list()
for img in soup.find_all('img'):
img_url = img.get('src')
if '.jpeg' in img_url or '.jpg' in img_url:
print(img_url)
img_urls.append(img_url)
print(img_urls)
!ls
# To download and save files with Python we can use
# the shutil library which is a file operations library
import shutil
for idx, img_url in enumerate(img_urls):
#enumarte to create a file integer name for every image
# make a request to the image URL
img_source = requests.get(img_url, stream=True)
# we set stream = True to download/
# stream the content of the data
with open('img'+str(idx)+'.jpg', 'wb') as file:
# open file connection, create file and write to it
shutil.copyfileobj(img_source.raw, file)
# save the raw file object
del img_source # to remove the file from memory
!ls
###Output
_____no_output_____
###Markdown
Scraping function to download files of any type from a websiteBelow is a function that takes in a website and a specific file type to download X of them from the website.
###Code
# Extended scraping function of any file format
import os # To interact with operating system and format file name
import shutil # To copy file object from python to disk
import requests
import bs4 as bs
def py_file_scraper(url, html_tag='img', source_tag='src', file_type='.jpg',max=-1):
'''
Function that scrapes a website for certain file formats.
The files will be placed in a folder called "files"
in the working directory.
url = the url we want to scrape from
html_tag = the file tag (usually img for images or
a for file links)
source_tag = the source tag for the file url
(usually src for images or href for files)
file_type = .png, .jpg, .pdf, .csv, .xls etc.
max = integer (max number of files to scrape,
if = -1 it will scrape all files)
'''
# make a directory called 'files'
# for the files if it does not exist
if not os.path.exists('files/'):
os.makedirs('files/')
print('Loading content from the url...')
source = requests.get(url).content
print('Creating content soup...')
soup = bs.BeautifulSoup(source,'lxml')
i=0
print('Finding tag:%s...'%html_tag)
for n, link in enumerate(soup.find_all(html_tag)):
file_url=link.get(source_tag)
print ('\n',n+1,'. File url',file_url)
if 'http' in file_url: # check that it is a valid link
print('It is a valid url..')
if file_type in file_url: #only check for specific
# file type
print('%s FILE TYPE FOUND IN THE URL...'%file_type)
file_name = os.path.splitext(os.path.basename(file_url))[0] + file_type
#extract file name from url
file_source = requests.get(file_url, stream = True)
# open new stream connection
with open('./files/'+file_name, 'wb') as file:
# open file connection, create file and
# write to it
shutil.copyfileobj(file_source.raw, file)
# save the raw file object
print('DOWNLOADED:',file_name)
i+=1
del file_source # delete from memory
else:
print('%s file type NOT found in url:'%file_type)
print('EXCLUDED:',file_url)
# urls not downloaded from
if i == max:
print('Max reached')
break
print('Done!')
###Output
_____no_output_____
###Markdown
Scrape funny cat pictures
###Code
py_file_scraper('https://funcatpictures.com/')
# scrape cats
!ls ./files
###Output
_____no_output_____
###Markdown
Scrape pdf's from Data-X site
###Code
py_file_scraper('https://data-x.blog/resources',
html_tag='a',source_tag='href',file_type='.pdf', \
max=5)
###Output
_____no_output_____
###Markdown
Scrape real data CSV files from websites
###Code
py_file_scraper('http://www-eio.upc.edu/~pau/cms/rdata/datasets.html',
html_tag='a', # R data sets
source_tag='href', file_type='.csv',max=5)
###Output
_____no_output_____
###Markdown
--- Breakout problemIn this Breakout Problem you should extract live weather data in Berkeley from:[http://forecast.weather.gov/MapClick.php?lat=37.87158815800046&lon=-122.27274583799971](http://forecast.weather.gov/MapClick.php?lat=37.87158815800046&lon=-122.27274583799971)* Task scrape * period / day (as Tonight, Friday, FridayNight etc.) * the temperature for the period (as Low, High) * the long weather description (e.g. Partly cloudy, with a low around 49..) Store the scraped data strings in a Pandas DataFrame**Hint:** The weather information is found in a div tag with `id='seven-day-forecast'` Appendix Scrape Bloomberg sitemap (XML) for current political news
###Code
# XML documents - site maps, all the urls. just between tags
# XML human and machine readable.
# Newest links: all the links for FIND SITE MAP!
# News websites will have sitemaps for politics, bot constantly
# tracking news track the sitemaps
# Before scraping a website look at robots.txt file
bs.BeautifulSoup(requests.get('https://www.bloomberg.com/robots.txt').content,'lxml')
source = requests.get('https://www.bloomberg.com/feeds/bpol/sitemap_news.xml').content
soup = bs.BeautifulSoup(source,'xml') # Note parser 'xml'
print(soup.prettify())
# Find political news headlines
for news in soup.find_all({'news'}):
print(news.title.text)
print(news.publication_date.text)
#print(news.keywords.text)
print('\n')
###Output
_____no_output_____
###Markdown
Web crawlWeb crawling is almost like webscraping, but instead you crawl a specific website (and often its subsites) and extract meta information. It can be seen as simple, recursive scraping. This can be used for web indexing (in order to build a web search engine). Web crawl Twitter account**Authors:** Kunal Desai & Alexander Fred Ojala
###Code
import bs4
from bs4 import BeautifulSoup
import requests
# Helper function to maintain the urls and the number of times they appear
url_dict = dict()
def add_to_dict(url_d, key):
if key in url_d:
url_d[key] = url_d[key] + 1
else:
url_d[key] = 1
# Recursive function which extracts links from the given url upto a given 'depth'.
def get_urls(url, depth):
if depth == 0:
return
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for link in soup.find_all('a'):
if link.has_attr('href') and "https://" in link['href']:
# print(link['href'])
add_to_dict(url_dict, link['href'])
get_urls(link['href'], depth - 1)
# Iterative function which extracts links from the given url upto a given 'depth'.
def get_urls_iterative(url, depth):
urls = [url]
for url in urls:
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for link in soup.find_all('a'):
if link.has_attr('href') and "https://" in link['href']:
add_to_dict(url_dict, link['href'])
urls.append(link['href'])
if len(urls) > depth:
break
get_urls("https://twitter.com/GolfWorld", 2)
for key in url_dict:
print(str(key) + " ---- " + str(url_dict[key]))
###Output
_____no_output_____
###Markdown
SEO: Visualize sitemap and categories in a website**Source:** https://www.ayima.com/guides/how-to-visualize-an-xml-sitemap-using-python.html
###Code
# Visualize XML sitemap with categories!
import requests
from bs4 import BeautifulSoup
url = 'https://www.sportchek.ca/sitemap.xml'
url = 'https://www.bloomberg.com/feeds/bpol/sitemap_index.xml'
page = requests.get(url)
print('Loaded page with: %s' % page)
sitemap_index = BeautifulSoup(page.content, 'html.parser')
print('Created %s object' % type(sitemap_index))
urls = [element.text for element in sitemap_index.findAll('loc')]
print(urls)
def extract_links(url):
''' Open an XML sitemap and find content wrapped in loc tags. '''
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
links = [element.text for element in soup.findAll('loc')]
return links
sitemap_urls = []
for url in urls:
links = extract_links(url)
sitemap_urls += links
print('Found {:,} URLs in the sitemap'.format(len(sitemap_urls)))
with open('sitemap_urls.dat', 'w') as f:
for url in sitemap_urls:
f.write(url + '\n')
'''
Categorize a list of URLs by site path.
The file containing the URLs should exist in the working directory and be
named sitemap_urls.dat. It should contain one URL per line.
Categorization depth can be specified by executing a call like this in the
terminal (where we set the granularity depth level to 5):
python categorize_urls.py --depth 5
The same result can be achieved by setting the categorization_depth variable
manually at the head of this file and running the script with:
python categorize_urls.py
'''
from __future__ import print_function
categorization_depth=3
# Main script functions
def peel_layers(urls, layers=3):
''' Builds a dataframe containing all unique page identifiers up
to a specified depth and counts the number of sub-pages for each.
Prints results to a CSV file.
urls : list
List of page URLs.
layers : int
Depth of automated URL search. Large values for this parameter
may cause long runtimes depending on the number of URLs.
'''
# Store results in a dataframe
sitemap_layers = pd.DataFrame()
# Get base levels
bases = pd.Series([url.split('//')[-1].split('/')[0] for url in urls])
sitemap_layers[0] = bases
# Get specified number of layers
for layer in range(1, layers+1):
page_layer = []
for url, base in zip(urls, bases):
try:
page_layer.append(url.split(base)[-1].split('/')[layer])
except:
# There is nothing that deep!
page_layer.append('')
sitemap_layers[layer] = page_layer
# Count and drop duplicate rows + sort
sitemap_layers = sitemap_layers.groupby(list(range(0, layers+1)))[0].count()\
.rename('counts').reset_index()\
.sort_values('counts', ascending=False)\
.sort_values(list(range(0, layers)), ascending=True)\
.reset_index(drop=True)
# Convert column names to string types and export
sitemap_layers.columns = [str(col) for col in sitemap_layers.columns]
sitemap_layers.to_csv('sitemap_layers.csv', index=False)
# Return the dataframe
return sitemap_layers
sitemap_urls = open('sitemap_urls.dat', 'r').read().splitlines()
print('Loaded {:,} URLs'.format(len(sitemap_urls)))
print('Categorizing up to a depth of %d' % categorization_depth)
sitemap_layers = peel_layers(urls=sitemap_urls,
layers=categorization_depth)
print('Printed {:,} rows of data to sitemap_layers.csv'.format(len(sitemap_layers)))
'''
Visualize a list of URLs by site path.
This script reads in the sitemap_layers.csv file created by the
categorize_urls.py script and builds a graph visualization using Graphviz.
Graph depth can be specified by executing a call like this in the
terminal:
python visualize_urls.py --depth 4 --limit 10 --title "My Sitemap" --style "dark" --size "40"
The same result can be achieved by setting the variables manually at the head
of this file and running the script with:
python visualize_urls.py
'''
from __future__ import print_function
# Set global variables
graph_depth = 3 # Number of layers deep to plot categorization
limit = 3 # Maximum number of nodes for a branch
title = '' # Graph title
style = 'light' # Graph style, can be "light" or "dark"
size = '8,5' # Size of rendered PDF graph
# Import external library dependencies
import pandas as pd
import graphviz
# Main script functions
def make_sitemap_graph(df, layers=3, limit=50, size='8,5'):
''' Make a sitemap graph up to a specified layer depth.
sitemap_layers : DataFrame
The dataframe created by the peel_layers function
containing sitemap information.
layers : int
Maximum depth to plot.
limit : int
The maximum number node edge connections. Good to set this
low for visualizing deep into site maps.
'''
# Check to make sure we are not trying to plot too many layers
if layers > len(df) - 1:
layers = len(df)-1
print('There are only %d layers available to plot, setting layers=%d'
% (layers, layers))
# Initialize graph
f = graphviz.Digraph('sitemap', filename='sitemap_graph_%d_layer' % layers)
f.body.extend(['rankdir=LR', 'size="%s"' % size])
def add_branch(f, names, vals, limit, connect_to=''):
''' Adds a set of nodes and edges to nodes on the previous layer. '''
# Get the currently existing node names
node_names = [item.split('"')[1] for item in f.body if 'label' in item]
# Only add a new branch it it will connect to a previously created node
if connect_to:
if connect_to in node_names:
for name, val in list(zip(names, vals))[:limit]:
f.node(name='%s-%s' % (connect_to, name), label=name)
f.edge(connect_to, '%s-%s' % (connect_to, name), label='{:,}'.format(val))
f.attr('node', shape='rectangle') # Plot nodes as rectangles
# Add the first layer of nodes
for name, counts in df.groupby(['0'])['counts'].sum().reset_index()\
.sort_values(['counts'], ascending=False).values:
f.node(name=name, label='{} ({:,})'.format(name, counts))
if layers == 0:
return f
f.attr('node', shape='oval') # Plot nodes as ovals
f.graph_attr.update()
# Loop over each layer adding nodes and edges to prior nodes
for i in range(1, layers+1):
cols = [str(i_) for i_ in range(i)]
nodes = df[cols].drop_duplicates().values
for j, k in enumerate(nodes):
# Compute the mask to select correct data
mask = True
for j_, ki in enumerate(k):
mask &= df[str(j_)] == ki
# Select the data then count branch size, sort, and truncate
data = df[mask].groupby([str(i)])['counts'].sum()\
.reset_index().sort_values(['counts'], ascending=False)
# Add to the graph
add_branch(f,
names=data[str(i)].values,
vals=data['counts'].values,
limit=limit,
connect_to='-'.join(['%s']*i) % tuple(k))
print(('Built graph up to node %d / %d in layer %d' % (j, len(nodes), i))\
.ljust(50), end='\r')
return f
def apply_style(f, style, title=''):
''' Apply the style and add a title if desired. More styling options are
documented here: http://www.graphviz.org/doc/info/attrs.html#d:style
f : graphviz.dot.Digraph
The graph object as created by graphviz.
style : str
Available styles: 'light', 'dark'
title : str
Optional title placed at the bottom of the graph.
'''
dark_style = {
'graph': {
'label': title,
'bgcolor': '#3a3a3a',
'fontname': 'Helvetica',
'fontsize': '18',
'fontcolor': 'white',
},
'nodes': {
'style': 'filled',
'color': 'white',
'fillcolor': 'black',
'fontname': 'Helvetica',
'fontsize': '14',
'fontcolor': 'white',
},
'edges': {
'color': 'white',
'arrowhead': 'open',
'fontname': 'Helvetica',
'fontsize': '12',
'fontcolor': 'white',
}
}
light_style = {
'graph': {
'label': title,
'fontname': 'Helvetica',
'fontsize': '18',
'fontcolor': 'black',
},
'nodes': {
'style': 'filled',
'color': 'black',
'fillcolor': '#dbdddd',
'fontname': 'Helvetica',
'fontsize': '14',
'fontcolor': 'black',
},
'edges': {
'color': 'black',
'arrowhead': 'open',
'fontname': 'Helvetica',
'fontsize': '12',
'fontcolor': 'black',
}
}
if style == 'light':
apply_style = light_style
elif style == 'dark':
apply_style = dark_style
f.graph_attr = apply_style['graph']
f.node_attr = apply_style['nodes']
f.edge_attr = apply_style['edges']
return f
# Read in categorized data
sitemap_layers = pd.read_csv('sitemap_layers.csv', dtype=str)
# Convert numerical column to integer
sitemap_layers.counts = sitemap_layers.counts.apply(int)
print('Loaded {:,} rows of categorized data from sitemap_layers.csv'\
.format(len(sitemap_layers)))
print('Building %d layer deep sitemap graph' % graph_depth)
f = make_sitemap_graph(sitemap_layers, layers=graph_depth,
limit=limit, size=size)
f = apply_style(f, style=style, title=title)
f.render(cleanup=True)
print('Exported graph to sitemap_graph_%d_layer.pdf' % graph_depth)
###Output
_____no_output_____ |
View_Stocks/view_robin_stocks.ipynb | ###Markdown
Library: [robin_stocks](https://readthedocs.org/projects/robin-stocks/downloads/pdf/latest/) Authentication and Login
###Code
login = r.login('[email protected]','0214@Kgraghavan',store_session=True)
###Output
_____no_output_____
###Markdown
List of Cryptocurrencies available
###Code
crypto_info=r.crypto.get_crypto_currency_pairs()
crypto_list=[]
for i in range(len(crypto_info)):
crypto_list.append(crypto_info[i]['asset_currency']['code'])
###Output
_____no_output_____
###Markdown
Read in Portfolio
###Code
df_portfolio=pd.read_csv('portfolio.csv')
df_portfolio.set_index('Parameters',inplace=True)
columns_list=df_portfolio.columns.tolist()
columns_dict={}
for stock in columns_list:
stock_str_rep=stock.replace('-','.')
columns_dict[stock]=stock_str_rep
df_portfolio.rename(columns=columns_dict,inplace=True)
df_portfolio.sort_values(by='weight',axis=1,ascending=False)
port_stocks=df_portfolio.columns.tolist()
df_suggested_equity=df_portfolio.loc['suggested_investment',:]
df_suggested_equity.transpose().plot(kind='bar',figsize=(12,8));
total_port_equity=df_suggested_equity.sum()
print('Total suggested equity value is: {}'.format(total_port_equity))
###Output
Total suggested equity value is: 13737.07735615393
###Markdown
Print total stocks value
###Code
stocks_dict=r.account.build_holdings()
#print('Stocks are: {}'.format(stocks_dict))
tickers=stocks_dict.keys()
ticker_list=list(tickers)
for ticker in ticker_list:
ticker.replace('-','.')
print(ticker_list)
###### Get crypto positions #####
crypto_holdings=r.get_crypto_positions()
print(float(crypto_holdings[3]['quantity']))
price_list=[float(stocks_dict[ticker]['equity']) for ticker in ticker_list]
for i in range(len(crypto_holdings)):
if float(crypto_holdings[i]['quantity'])>0:
crypto_ticker=crypto_holdings[i]['currency']['code'].replace('-USD','')
crypto_qty=float(crypto_holdings[i]['quantity'])
crypto_mark_price=float(r.get_crypto_quote(symbol=crypto_ticker,info='mark_price'))
crypto_price=crypto_qty*crypto_mark_price
price_list.append(crypto_price)
ticker_list.append('{}.USD'.format(crypto_holdings[i]['currency']['code']))
######## Equity of stocks #################
df_allstock_equity=pd.DataFrame(index=['Price'],columns=ticker_list);
df_allstock_equity.loc['Price',:]=price_list;
allstock_equity_cols=df_allstock_equity.columns.tolist()
df_allstock_equity
###Output
_____no_output_____
###Markdown
Stocks common to portfolio and investment
###Code
comm_stocks=list(set(port_stocks).intersection(set(ticker_list)))
print('Stocks common to recommended and actual portfolio: ',comm_stocks)
port_stocks=[stock.replace('-','.') for stock in port_stocks]
###Output
Stocks common to recommended and actual portfolio: ['MARA', 'GPRO', 'FCEL', 'UONE', 'SENS', 'AMRS', 'GNUS', 'DMLRY', 'VXRT', 'MVIS', 'UUUU', 'IDEX', 'GME', 'GLDG', 'ZNGA', 'AG', 'SLVP', 'PLTR', 'WKHS', 'INO', 'SNAP', 'NIO', 'CARR', 'FUBO', 'RIOT', 'AAPL', 'INFY', 'AMD', 'SLV', 'SOLO', 'GSAT', 'IEHS', 'GEVO', 'CLNE', 'DOGE.USD', 'KOSS']
###Markdown
Stocks not common Stock in recommended portfolio but not in actual portfolio
###Code
not_in_ticker_list=[stock for stock in port_stocks if stock not in ticker_list]
not_in_ticker_list
###Output
_____no_output_____
###Markdown
Stock in actual portfolio but not in recommended portfolio
###Code
not_in_port_stocks=[ticker for ticker in ticker_list if ticker not in port_stocks]
not_in_port_stocks
###Output
_____no_output_____
###Markdown
Share Price
###Code
share_price={}
share_price_values=[float(stocks_dict[ticker]['price']) for ticker in tickers]
for i in range(len(tickers)):
ticker=ticker_list[i]
share_price[ticker]=share_price_values[i]
share_price=pd.DataFrame(share_price,index=['price'])
share_price
###Output
_____no_output_____
###Markdown
Share Quantity
###Code
share_quantity={}
share_quantities=[float(stocks_dict[ticker]['quantity']) for ticker in tickers]
for i in range(len(tickers)):
ticker=ticker_list[i]
share_quantity[ticker]=share_quantities[i]
share_quantity=pd.DataFrame(share_quantity,index=['quantity'])
share_quantity
###Output
_____no_output_____
###Markdown
Total Equity
###Code
equity={}
total_equity=0
for i in range(len(ticker_list)):
ticker=ticker_list[i]
equity[ticker]=price_list[i]
total_equity=total_equity+equity[ticker]
print('Total Stocks equity value is: {}'.format(total_equity))
equity_comm={}
total_comm_equity=df_allstock_equity.loc['Price',comm_stocks].sum()
print('Total Common Stocks equity value is: {}'.format(total_comm_equity))
df_equity=pd.DataFrame.from_dict(data=equity,orient='index',columns=['Equity'])
df_plot=df_equity.sort_values(by='Equity',axis=0,ascending=False)
if df_plot.shape[0]>0:
df_plot.plot(kind='bar',figsize=(12,8))
else:
print('no data to plot')
df_equity=df_equity.transpose()
df_equity.sort_values(by='Equity',axis=1,ascending=False)
###Output
Total Stocks equity value is: 14172.917766739001
Total Common Stocks equity value is: 10462.587766739001
###Markdown
Equity Change
###Code
if len(ticker_list)>0:
equity_change={}
total_port_equity_change=0
for i in range(len(ticker_list)):
stock=ticker_list[i]
if stock in df_equity.columns.tolist() and stock in df_suggested_equity.index.tolist():
equity_change[stock]=df_equity.loc['Equity',stock]-df_suggested_equity[stock]
total_port_equity_change=total_port_equity_change+equity_change[stock]
equity_change_percent=total_port_equity_change/total_port_equity*100
df_equity_change=pd.DataFrame.from_dict(data=equity_change,orient='index',columns=['Equity_Change']).transpose()
df_equity_change_ratio=pd.DataFrame(df_equity_change.
div(df_suggested_equity))
df_equity_change_percent=df_equity_change_ratio*100
df_equity_change_ratio.rename(index={'Equity_Change':'Equity_Change_Ratio'},inplace=True)
df_equity_change_percent.rename(index={'Equity_Change':'Equity_Change_Percent'},inplace=True)
if len(comm_stocks)>0:
equity_comm_change={}
equity_comm_change_percent={}
total_equity_comm_change=0
for i in range(len(comm_stocks)):
comm_stock=comm_stocks[i]
if comm_stock in list(stocks_dict.keys()):
equity_comm_change[comm_stock]=float(stocks_dict[comm_stock]['equity_change'])
equity_comm_change_percent[comm_stock]=float(stocks_dict[comm_stock]['percent_change'])
else:
equity_comm_change[comm_stock]=df_equity.loc['Equity',comm_stock]-df_suggested_equity[comm_stock]
equity_comm_change_percent[comm_stock]=equity_comm_change[comm_stock]/100
total_equity_comm_change=total_equity_comm_change+equity_comm_change[comm_stock]
print('Total Common equity change value is: {}'.format(total_equity_comm_change))
total_equity_comm_change_percent=total_equity_comm_change/total_port_equity*100
print('Total Common Change Percent is: {}'.format(total_equity_comm_change_percent))
df_equity_comm_change=pd.DataFrame.from_dict(data=equity_comm_change,orient='index',columns=['Equity_Comm_Change']).transpose()
df_equity_comm_change_percent=pd.DataFrame.from_dict(data=equity_comm_change_percent,orient='index',columns=['Equity_Comm_Change_Percent']).transpose()
# df_equity_comm_change_ratio=pd.DataFrame(df_equity_comm_change.
# div(df_suggested_equity))
# df_equity_comm_change_percent=df_equity_comm_change_ratio*100
# df_equity_comm_change_ratio.rename(index={'Equity_Comm_Change':'Equity_Comm_Change_Ratio'},inplace=True)
# df_equity_comm_change_percent.rename(index={'Equity_Comm_Change':'Equity_Comm_Change_Percent'},inplace=True)
############################ Plots #####################################
df_equity_comm_change.sort_values(by='Equity_Comm_Change',axis=1,ascending=False).transpose().plot(kind='bar',figsize=(18,6));
print(df_equity_comm_change.sort_values(by='Equity_Comm_Change',axis=1,ascending=False))
df_equity_comm_change_percent.sort_values(by='Equity_Comm_Change_Percent',axis=1,ascending=False).transpose().plot(kind='bar',figsize=(18,6));
else:
print('no common stocks')
else:
print('no positions')
###Output
Total Common equity change value is: 209.22221273899996
Total Common Change Percent is: 1.5230474963094873
UONE GEVO GME UUUU AG \
Equity_Comm_Change 100.64582 84.6491 61.634784 52.939008 37.060884
DOGE.USD PLTR WKHS GSAT INO ... \
Equity_Comm_Change 33.807767 24.31485 21.132384 17.5512 17.5228 ...
GPRO CARR GLDG FCEL MVIS \
Equity_Comm_Change -5.699184 -13.550124 -17.3439 -18.058339 -20.17945
AMD AMRS SENS MARA FUBO
Equity_Comm_Change -26.680766 -29.023508 -40.29498 -40.395003 -60.554939
[1 rows x 36 columns]
###Markdown
Update Stop Loss Stop Loss Sell Limit Price
###Code
stop_loss_sell_limit_price={}
if len(comm_stocks)>0:
for i in range(len(comm_stocks)):
stock=comm_stocks[i]
try:
stop_loss_sell_diff=df_portfolio.loc['price_bound',stock]
limit_price=float(share_price[stock]-stop_loss_sell_diff)
if limit_price<0:
stop_loss_sell_limit_price[stock]=0
else:
stop_loss_sell_limit_price[stock]=limit_price
except:
print(stock)
else:
print('no common stocks')
stop_loss_sell_limit_price
###Output
_____no_output_____
###Markdown
Cancel Existing Stock Orders
###Code
r.get_all_open_stock_orders()
r.orders.cancel_all_stock_orders();
###Output
_____no_output_____
###Markdown
Update Stop Loss Sell orders for all stocks
###Code
stop_loss_sell_info={}
if len(comm_stocks)==0:
print('no common stocks to update stop loss')
else:
for i in range(len(comm_stocks)):
comm_stock=comm_stocks[i]
comm_stock_modified=comm_stock.replace('-','.')
sell_quantity=m.floor(share_quantity[comm_stock])
stop_price=stop_loss_sell_limit_price[comm_stock]
stop_loss_sell_info[comm_stock]=r.orders.order_sell_stop_loss(symbol=comm_stock_modified,
quantity=sell_quantity,stopPrice=stop_price,timeInForce='gtc')
stop_loss_sell_info
###Output
_____no_output_____
###Markdown
Sell and Buy Stocks Sell all portfolio stocks
###Code
for i in range(len(comm_stocks)):
comm_stock=comm_stocks[i]
qty=share_quantity.loc['quantity',comm_stock]
sell_order=r.orders.order_sell_fractional_by_quantity(symbol=comm_stock,
quantity=qty,
timeInForce='gfd')
if not sell_order['reject_reason']==None:
print(ticker)
###Output
_____no_output_____
###Markdown
Sell all stocks
###Code
stocks_dict=r.account.build_holdings()
#print('Stocks are: {}'.format(stocks_dict))
tickers=stocks_dict.keys()
ticker_list=list(tickers)
crypto_ticker_list=[]
crypto_holdings_temp=crypto_holdings.copy();
for i in range(len(crypto_holdings_temp)):
qty=float(crypto_holdings[i]['cost_bases'][0]['direct_quantity'])
crypto_ticker_list.append(crypto_holdings[i]['currency']['code'])
if qty>0:
crypto=crypto_ticker_list[i]
sell_order=r.orders.order_sell_crypto_by_quantity(symbol=crypto,
quantity=qty,
timeInForce='gtc')
print('Crypto sell orders :',sell_order)
else:
print('no crypto to sell')
if len(ticker_list)>0:
for i in range(len(ticker_list)):
ticker=ticker_list[i]
qty=share_quantity.loc['quantity',ticker]
sell_order=r.orders.order_sell_fractional_by_quantity(symbol=ticker,
quantity=qty,
timeInForce='gfd')
print('stocks_sell_prders :',sell_order)
else:
print('no stocks to sell')
###Output
_____no_output_____
###Markdown
Buy Portfolio Stocks
###Code
stocks_dict=r.account.build_holdings()
#print('Stocks are: {}'.format(stocks_dict))
tickers=stocks_dict.keys()
ticker_list=list(tickers)
###### Get crypto positions #####
crypto_holdings=r.get_crypto_positions()
### Add crypto position tickers to ticker_list ###
for i in range(len(crypto_holdings)):
qty=float(crypto_holdings[i]['cost_bases'][0]['direct_quantity'])
if qty>0:
ticker_list.append(crypto_holdings[i]['currency']['code'])
for i in range(len(df_portfolio.columns)):
ticker_suggested=df_portfolio.columns[i]
ticker_suggested_modified=ticker_suggested.replace('-','.')
ticker_suggested_modified=ticker_suggested_modified.replace('.USD','')
equity=df_portfolio.loc['suggested_investment',ticker_suggested]
if ticker_suggested_modified not in ticker_list:
print('{}:{}'.format(ticker_suggested_modified,equity))
if ticker_suggested_modified in crypto_list:
buy_order=r.orders.order_buy_crypto_by_price(symbol=ticker_suggested_modified,amountInDollars=equity,
)
else:
buy_order=r.orders.order_buy_fractional_by_price(symbol=ticker_suggested_modified,
amountInDollars=equity,
timeInForce='gfd')
print(buy_order)
#if not buy_order['reject_reason']==None:
#print(ticker)
###Output
_____no_output_____ |
Analysis_Of_Responses.ipynb | ###Markdown
From the above statistics, it can be concluded that:1. Mean time required to solve the first bug was the maximum in **Kotlin**2. In comparison with other languages, **Go** had the more number of users with high familiarity.
###Code
langs = ["Kotlin", "Go", "Julia"]
i = 0
fig, axes = plt.subplots(4, 3, figsize = (23, 20))
for language in languages:
lang = langs[i]
labels = []
data = []
for c, df in language.groupby("Familiarity"):
labels.append(c)
data.append(len(df))
axes[(i // 4), i].pie(data, labels = labels, autopct='%1.2f%%')
axes[(i // 4), i].set_title("Familiarity for " + lang)
labels = []
data = []
for c, df in language.groupby("BugsTooObvious"):
labels.append(c)
data.append(len(df))
axes[(i // 4) + 1, i].pie(data, labels = labels, autopct='%1.2f%%')
axes[(i // 4) + 1, i].set_title("Were the bugs to obvious in " + lang)
labels = []
data = []
for c, df in language.groupby("DifficultyLevel"):
labels.append(c)
data.append(len(df))
axes[(i // 4) + 2, i].pie(data, labels = labels, autopct='%1.2f%%')
axes[(i // 4) + 2, i].set_title("Difficulty Level of bugs in " + lang)
axes[(i // 4) + 3, i].set_ylim(0, 3)
axes[(i // 4) + 3, i].hist(language["TimeToFirstBug"])
axes[(i // 4) + 3, i].set_title("Histogram for time taken to find 1st bug (time vs #users) in " + lang)
i = i + 1
plt.show()
###Output
_____no_output_____
###Markdown
From the above graphs, it can be concluded that:1. **Go** was the most familiar language and **Julia** was the least familiar language.2. **Kotlin** had the least obvious bugs.3. In general, difficulty level of bugs in **Kotlin** was more than other languages.4. More number of users took longer times in **Kotlin** as compared to other languages.
###Code
for t, df in merged.groupby('Token'):
if len(df) > 1:
for f, df1 in df.groupby('Familiarity'):
if len(df1) > 1:
for idx, row in df1.iterrows():
print('Language : ' + row["Language"] + ' Time to solve first bug : ' + str(row["TimeToFirstBug"]))
print("\n")
###Output
Language : Kotlin Time to solve first bug : 6
Language : Go Time to solve first bug : 4
Language : Julia Time to solve first bug : 8
Language : Kotlin Time to solve first bug : 2
Language : Julia Time to solve first bug : 1
Language : Kotlin Time to solve first bug : 4
Language : Go Time to solve first bug : 4
Language : Julia Time to solve first bug : 2
Language : Kotlin Time to solve first bug : 4
Language : Julia Time to solve first bug : 5
Language : Kotlin Time to solve first bug : 11
Language : Julia Time to solve first bug : 10
Language : Kotlin Time to solve first bug : 4
Language : Go Time to solve first bug : 2
|
Convolutional Neural Networks/Exercise_4_Multi_class_classifier_Question-FINAL.ipynb | ###Markdown
Submission Instructions
###Code
# Now click the 'Submit Assignment' button above.
###Output
_____no_output_____
###Markdown
When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
###Code
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____ |
notebooks/D1_L6_MatPlotLib_and_Seaborn/00-Introduction-To-Matplotlib.ipynb | ###Markdown
Visualization with Matplotlib We'll now take an in-depth look at the Matplotlib package for visualization in Python.Matplotlib is a multi-platform data visualization library built on NumPy arrays, and designed to work with the broader SciPy stack.It was conceived by John Hunter in 2002, originally as a patch to Python for enabling interactive MATLAB-style plotting via gnuplot from the Python command line.Python's creator, Fernando Perez, was at the time scrambling to finish his PhD, and let John know he wouldn’t have time to review the patch for several months.John took this as a cue to set out on his own, and the Matplotlib package was born, with version 0.1 released in 2003.It received an early boost when it was adopted as the plotting package of choice of the Space Telescope Science Institute (the folks behind the Hubble Telescope), which financially supported Matplotlib’s development and greatly expanded its capabilities.One of Matplotlib’s most important features is its ability to play well with many operating systems and graphics backends.Matplotlib supports dozens of backends and output types, which means you can count on it to work regardless of which operating system you are using or which output format you wish.This cross-platform, everything-to-everyone approach has been one of the great strengths of Matplotlib.It has led to a large user base, which in turn has led to an active developer base and Matplotlib’s powerful tools and ubiquity within the scientific Python world.In recent years, however, the interface and style of Matplotlib have begun to show their age.Newer tools like ggplot and ggvis in the R language, along with web visualization toolkits based on D3js and HTML5 canvas, often make Matplotlib feel clunky and old-fashioned.Still, I'm of the opinion that we cannot ignore Matplotlib's strength as a well-tested, cross-platform graphics engine.Recent Matplotlib versions make it relatively easy to set new global plotting styles (see *Customizing Matplotlib: Configurations and Style Sheets*), and people have been developing new packages that build on its powerful internals to drive Matplotlib via cleaner, more modern APIs—for example, Seaborn (discussed in *Visualization With Seaborn*), [ggpy](http://yhat.github.io/ggpy/), [HoloViews](http://holoviews.org/), [Altair](http://altair-viz.github.io/), and even Pandas itself can be used as wrappers around Matplotlib's API.Even with wrappers like these, it is still often useful to dive into Matplotlib's syntax to adjust the final plot output.For this reason, I believe that Matplotlib itself will remain a vital piece of the data visualization stack, even if new tools mean the community gradually moves away from using the Matplotlib API directly. General Matplotlib TipsBefore we dive into the details of creating visualizations with Matplotlib, there are a few useful things you should know about using the package. Importing MatplotlibJust as we use the ``np`` shorthand for NumPy and the ``pd`` shorthand for Pandas, we will use some standard shorthands for Matplotlib imports:
###Code
import matplotlib as mpl
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
The ``plt`` interface is what we will use most often, as we shall see throughout this chapter. Setting StylesWe will use the ``plt.style`` directive to choose appropriate aesthetic styles for our figures.Here we will set the ``classic`` style, which ensures that the plots we create use the classic Matplotlib style:
###Code
plt.style.use('classic')
###Output
_____no_output_____
###Markdown
Throughout this section, we will adjust this style as needed.Note that the stylesheets used here are supported as of Matplotlib version 1.5; if you are using an earlier version of Matplotlib, only the default style is available.For more information on stylesheets, see *Customizing Matplotlib: Configurations and Style Sheets*. ``show()`` or No ``show()``? How to Display Your Plots A visualization you can't see won't be of much use, but just how you view your Matplotlib plots depends on the context.The best use of Matplotlib differs depending on how you are using it; roughly, the three applicable contexts are using Matplotlib in a script, in an IPython terminal, or in an IPython notebook. Plotting from a scriptIf you are using Matplotlib from within a script, the function ``plt.show()`` is your friend.``plt.show()`` starts an event loop, looks for all currently active figure objects, and opens one or more interactive windows that display your figure or figures.So, for example, you may have a file called *myplot.py* containing the following:```python ------- file: myplot.py ------import matplotlib.pyplot as pltimport numpy as npx = np.linspace(0, 10, 100)plt.plot(x, np.sin(x))plt.plot(x, np.cos(x))plt.show()```You can then run this script from the command-line prompt, which will result in a window opening with your figure displayed:```$ python myplot.py```The ``plt.show()`` command does a lot under the hood, as it must interact with your system's interactive graphical backend.The details of this operation can vary greatly from system to system and even installation to installation, but matplotlib does its best to hide all these details from you.One thing to be aware of: the ``plt.show()`` command should be used *only once* per Python session, and is most often seen at the very end of the script.Multiple ``show()`` commands can lead to unpredictable backend-dependent behavior, and should mostly be avoided. Plotting from an IPython shellIt can be very convenient to use Matplotlib interactively within an IPython shell (see *IPython: Beyond Normal Python*).IPython is built to work well with Matplotlib if you specify Matplotlib mode.To enable this mode, you can use the ``%matplotlib`` magic command after starting ``ipython``:```ipythonIn [1]: %matplotlibUsing matplotlib backend: TkAggIn [2]: import matplotlib.pyplot as plt```At this point, any ``plt`` plot command will cause a figure window to open, and further commands can be run to update the plot.Some changes (such as modifying properties of lines that are already drawn) will not draw automatically: to force an update, use ``plt.draw()``.Using ``plt.show()`` in Matplotlib mode is not required. Plotting from an IPython notebookThe IPython notebook is a browser-based interactive data analysis tool that can combine narrative, code, graphics, HTML elements, and much more into a single executable document (see *IPython: Beyond Normal Python*).Plotting interactively within an IPython notebook can be done with the ``%matplotlib`` command, and works in a similar way to the IPython shell.In the IPython notebook, you also have the option of embedding graphics directly in the notebook, with two possible options:- ``%matplotlib notebook`` will lead to *interactive* plots embedded within the notebook- ``%matplotlib inline`` will lead to *static* images of your plot embedded in the notebookFor this book, we will generally opt for ``%matplotlib inline``:
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
After running this command (it needs to be done only once per kernel/session), any cell within the notebook that creates a plot will embed a PNG image of the resulting graphic:
###Code
import numpy as np
x = np.linspace(0, 10, 100)
fig = plt.figure()
plt.plot(x, np.sin(x), '-')
plt.plot(x, np.cos(x), '--');
###Output
_____no_output_____
###Markdown
Saving Figures to FileOne nice feature of Matplotlib is the ability to save figures in a wide variety of formats.Saving a figure can be done using the ``savefig()`` command.For example, to save the previous figure as a PNG file, you can run this:
###Code
fig.savefig('my_figure.png')
###Output
_____no_output_____
###Markdown
We now have a file called ``my_figure.png`` in the current working directory:
###Code
#!ls -lh my_figure.png
###Output
_____no_output_____
###Markdown
To confirm that it contains what we think it contains, let's use the IPython ``Image`` object to display the contents of this file:
###Code
from IPython.display import Image
Image('my_figure.png')
###Output
_____no_output_____
###Markdown
In ``savefig()``, the file format is inferred from the extension of the given filename.Depending on what backends you have installed, many different file formats are available.The list of supported file types can be found for your system by using the following method of the figure canvas object:
###Code
fig.canvas.get_supported_filetypes()
###Output
_____no_output_____
###Markdown
Note that when saving your figure, it's not necessary to use ``plt.show()`` or related commands discussed earlier. Two Interfaces for the Price of OneA potentially confusing feature of Matplotlib is its dual interfaces: a convenient MATLAB-style state-based interface, and a more powerful object-oriented interface. We'll quickly highlight the differences between the two here. MATLAB-style InterfaceMatplotlib was originally written as a Python alternative for MATLAB users, and much of its syntax reflects that fact.The MATLAB-style tools are contained in the pyplot (``plt``) interface.For example, the following code will probably look quite familiar to MATLAB users:
###Code
plt.figure() # create a plot figure
# create the first of two panels and set current axis
plt.subplot(2, 1, 1) # (rows, columns, panel number)
plt.plot(x, np.sin(x))
# create the second panel and set current axis
plt.subplot(2, 1, 2)
plt.plot(x, np.cos(x));
###Output
_____no_output_____
###Markdown
It is important to note that this interface is *stateful*: it keeps track of the "current" figure and axes, which are where all ``plt`` commands are applied.You can get a reference to these using the ``plt.gcf()`` (get current figure) and ``plt.gca()`` (get current axes) routines.While this stateful interface is fast and convenient for simple plots, it is easy to run into problems.For example, once the second panel is created, how can we go back and add something to the first?This is possible within the MATLAB-style interface, but a bit clunky.Fortunately, there is a better way. Object-oriented interfaceThe object-oriented interface is available for these more complicated situations, and for when you want more control over your figure.Rather than depending on some notion of an "active" figure or axes, in the object-oriented interface the plotting functions are *methods* of explicit ``Figure`` and ``Axes`` objects.To re-create the previous plot using this style of plotting, you might do the following:
###Code
# First create a grid of plots
# ax will be an array of two Axes objects
fig, ax = plt.subplots(2)
# Call plot() method on the appropriate object
ax[0].plot(x, np.sin(x))
ax[1].plot(x, np.cos(x));
###Output
_____no_output_____ |
Lectures/.ipynb_checkpoints/Lec23-checkpoint.ipynb | ###Markdown
Lec5 `5/12` Lazy EvaluationFor **applicative order** evaluation, we had that:1. Eval subexpressions2. If primitive proc, just apply it3. If compound proc, eval body in new env which extends proc.env with new frame that binds proc params to arg valsIn **lazy evaluation (normal order)**:- We apply proc to unevaluated argument subexpressions.- Eval a subexpression only when value is needed. - to print - primitive procedure (they are 'strict' in their args), e.g, ```scheme (foo (+ x y) (- x y)) ;-------------------> Evaluated in normal order ; -----> Evaluated in applicative order, not normal order ; ------> Evaluated in applicative order, not normal order ``` Exercise 1In applicative order:
###Code
(define (foo x)
(display "inside foo") (newline) ; dbg
(+ x x))
(foo (begin
(display "eval arg") (newline) ; dbg
222))
###Output
eval arg
inside foo
|
notebooks/Astropy-citations.ipynb | ###Markdown
Top 10 authors that cite astropy as lead author:
###Code
astropy_df['authors'].str.split(';').apply(lambda x: x[0]).value_counts()[:10]
###Output
_____no_output_____
###Markdown
Top 25 authors that cite Astropy on lead co-authored papers:
###Code
astropy_df['authors'].str.split(';').explode().value_counts()[:20]
###Output
_____no_output_____
###Markdown
Number of citations per month:
###Code
g = astropy_df.groupby(by=[astropy_df.index.year, astropy_df.index.month]).count()
group_dates = [datetime.date(x[0], x[1], 1) for x in g.index]
group_dates_dec = [x[0] + x[1]/12 for x in g.index]
for add_lines in [True, False]:
fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
ax.plot(group_dates, g['pubdate.1'].values,
marker='', drawstyle='steps-mid',
lw=2, color='tab:orange')
ax.set_xlim(datetime.date(2013, 8, 1),
datetime.datetime.now())
ax.set_xlabel('time')
ax.set_ylabel('Astropy papers\nrefereed citations per month')
if add_lines:
ylim = ax.get_ylim()
yptp = ylim[1] - ylim[0]
ax.axvline(datetime.date(2013, 10, 1),
zorder=-1, linestyle='--')
ax.axvline(datetime.date(2018, 9, 1),
zorder=-1, linestyle='--')
ax.text(datetime.date(2018, 10-2, 1),
ylim[1] - 0.05 * yptp,
'Astropy paper 2',
ha='right', va='top', fontsize=16)
ax.text(datetime.date(2013, 10+1, 1),
ylim[1] - 0.05 * yptp,
'Astropy paper 1',
ha='left', va='top', fontsize=16)
extra = '-lines' if add_lines else ''
fig.savefig(f'../plots/astropy-citations{extra}.png', dpi=300)
###Output
_____no_output_____
###Markdown
Journal statistics:
###Code
unq_journals, counts = np.unique(astropy_df.pub.values.astype(str),
return_counts=True)
print(f"Number of journals that cite astropy: {len(unq_journals)}")
astropy_df['pub'].astype(str).value_counts()[:20]
###Output
_____no_output_____
###Markdown
Top 10 authors that cite astropy as lead author:
###Code
astropy_df['authors'].str.split(';').apply(lambda x: x[0]).value_counts()[:10]
###Output
_____no_output_____
###Markdown
Top 25 authors that cite Astropy on co-authored papers:
###Code
astropy_df['authors'].str.split(';').explode().value_counts()[:20]
###Output
_____no_output_____
###Markdown
Number of citations per month:
###Code
g = astropy_df.groupby(by=[astropy_df.index.year, astropy_df.index.month]).count()
group_dates = [datetime.date(x[0], x[1], 1) for x in g.index]
group_dates_dec = [x[0] + x[1]/12 for x in g.index]
for add_lines in [True, False]:
fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
ax.plot(group_dates, g['pubdate.1'].values,
marker='', drawstyle='steps-mid',
lw=2, color='tab:orange')
ax.set_xlim(datetime.date(2013, 8, 1),
datetime.datetime.now())
ax.set_xlabel('time')
ax.set_ylabel('Astropy papers\nrefereed citations per month')
if add_lines:
ylim = ax.get_ylim()
yptp = ylim[1] - ylim[0]
ax.axvline(datetime.date(2013, 10, 1),
zorder=-1, linestyle='--')
ax.axvline(datetime.date(2018, 9, 1),
zorder=-1, linestyle='--')
ax.text(datetime.date(2018, 10-2, 1),
ylim[1] - 0.05 * yptp,
'Astropy paper 2',
ha='right', va='top', fontsize=16)
ax.text(datetime.date(2013, 10+1, 1),
ylim[1] - 0.05 * yptp,
'Astropy paper 1',
ha='left', va='top', fontsize=16)
extra = '-lines' if add_lines else ''
fig.text(1, 0, 'Source: NASA/ADS', fontsize=16, ha='right')
fig.savefig(f'../plots/astropy-citations{extra}.png', dpi=300)
all_pubs = np.unique(astropy_df['pub'].values.astype(str))
g = astropy_df.groupby(by=[astropy_df.index.year,
astropy_df.pub]).count()
group_dates = np.array([datetime.date(x[0], 1, 1) for x in g.index])
group_dates_dec = [x[0] for x in g.index]
pubs = [str(x[1]) for x in g.index]
# fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
# ax.plot(group_dates, g['pubdate.1'].values,
# marker='', drawstyle='steps-mid',
# lw=2, color='tab:orange')
# ax.set_xlim(datetime.date(2013, 8, 1),
# datetime.datetime.now())
# ax.set_xlabel('time')
# ax.set_ylabel('Astropy papers\nrefereed citations per month')
# fig.text(1, 0, 'Source: NASA/ADS', fontsize=16, ha='right')
# # fig.savefig(f'../plots/astropy-citations{extra}.png', dpi=300)
fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
for pub in all_pubs:
mask = pubs == np.array(pub)
gg = g[mask]
dates = group_dates[mask]
if gg['year'].values.sum() < 50:
continue
ax.plot(dates, gg['year'].values,
marker='', #drawstyle='steps-mid',
lw=2, label=pub)
ax.set_xlim(datetime.date(2013, 8, 1),
datetime.datetime.now())
ax.set_xlabel('time')
ax.set_ylabel('Astropy papers\nrefereed citations per year')
ax.set_yscale('log')
ax.legend()
fig.text(1, 0, 'Source: NASA/ADS', fontsize=16, ha='right')
fig.savefig(f'../plots/astropy-citations-per-journal.png', dpi=300)
fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)
for pub in all_pubs:
mask = pubs == np.array(pub)
gg = g[mask]
dates = group_dates[mask]
if gg['year'].values.sum() >= 50 or gg['year'].values.sum() < 10:
continue
ax.plot(dates, gg['year'].values,
marker='', #drawstyle='steps-mid',
lw=2, label=pub)
ax.set_xlim(datetime.date(2013, 8, 1),
datetime.datetime.now())
ax.set_xlabel('time')
ax.set_ylabel('Astropy papers\nrefereed citations per year')
ax.set_yscale('log')
ax.legend()
fig.text(1, 0, 'Source: NASA/ADS', fontsize=16, ha='right')
fig.savefig(f'../plots/astropy-citations-per-journal-few.png', dpi=300)
###Output
_____no_output_____
###Markdown
Journals without many citations to Astropy:
###Code
for pub in all_pubs:
mask = pubs == np.array(pub)
gg = g[mask]
dates = group_dates[mask]
if gg['year'].values.sum() >= 10:
continue
print(pub)
###Output
_____no_output_____ |
C2 Statistics and Model Creation/SOLUTIONS/SOLUTION_Tech_Fun_C1_P2_Game_AI,_OOP_and_Agents_PART_1.ipynb | ###Markdown
Technology Fundamentals Course 2, Project Part 2: Building Agents and Object Oriented Programming**Instructor**: Wesley Beckner**Contact**: [email protected]**Teaching Assitants**: Varsha Bang, Harsha Vardhan**Contact**: [email protected], [email protected] part II of our tic-tac-toe and AI journey, we're going to take all the functions we've defined so far and make them object oriented!--- 2.0 Preparing Environment and Importing Data[back to top](top) 2.0.1 Import Packages[back to top](top)
###Code
def visualize_board(board_values):
"""
Visualizes the board during gameplay
Parameters
----------
board_values : list
The values ('X', 'O', or ' ' at each board location)
Returns
-------
None
"""
print(
"|{}|{}|{}|\n|{}|{}|{}|\n|{}|{}|{}|\n".format(*board_values)
)
def init_board():
"""
Initializes an empty board for the start of gameplay
Parameters
----------
None
Returns
-------
board : dict
a dictionary with keys 1-9 and single space (' ') string as values
"""
return {1: ' ',
2: ' ',
3: ' ',
4: ' ',
5: ' ',
6: ' ',
7: ' ',
8: ' ',
9: ' ',}
# the keys on the game board where, if filled completely with X's or O's a
# winner has occurred
win_patterns = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9],
[1,5,9], [7,5,3]]
def check_winning(board):
"""
Checks if the game has a winner
Parameters
----------
board : dict
the tictactoe board as a dictionary
Returns
-------
win_statement : str
defaults to an empty string if no winner. Otherwise 'X' Won! or 'O' Won!
"""
for pattern in win_patterns:
values = [board[i] for i in pattern]
if values == ['X', 'X', 'X']:
return "'X' Won!"
elif values == ['O', 'O', 'O']:
return "'O' Won!"
return ''
def tic_tac_toe():
"""
The tictactoe game engine. Runs the while loop that handles the game
Parameters
----------
None
Returns
-------
None
"""
print("'X' will go first!")
board = init_board()
while True:
for player in (['X', 'O']):
visualize_board(board.values())
move = int(input("{}, what's your move?".format(player)))
if board[move] != ' ':
move = input("{}, that position is already taken! "\
"What's your move?".format(player))
else:
board[move] = player
winner = check_winning(board)
if winner == '':
continue
else:
print(winner)
break
if winner != '':
break
###Output
_____no_output_____
###Markdown
2.1 OOP[back to top](top)Notice how we have so many functions with calls to our main object `board`. Let's try to organize this into a more object oriented scheme.We'll also want to write a function that recognizes when a stalemate has been reached! 2.1.1 Thinking in ObjectsIt's helpful to think of how our code can be divided into useful segments that can then be extended, interfaced, used elsewhere, etc.It's just like we had when we were playing with our pokeball and pokemon objects. In that case, it made sense to make two separate objects one for pokemon and one for pokeballs.Can you think of any way that would make sense to divide our code into objects? I can think of two. 2.1.2 class TicTacToethe first object will be one that handles our board and all of its methods and attributes. In this class called `TicTacToe` we will have the attributes: * `winner`, initialized as an empty string, and updates at the conclusion of a game with 'X', 'O', or 'Stalemate' * `start_player` initialized as an empty string and updates at the start of a game with 'X' or 'O' * `board` initialized as our empty board dictionary * `win_patterns` the list of lists containing the winning patterns of the gameand then we will have three different methods, each of which takes one parameter, `self`* `visualize_board`* `check_winning`* `check_stalemate` : a new function. Returns "It's a stalemate!" and sets `self.winner = "Stalemate"` (note there is a bug in the way this is currently written, we will move along for now and work through a debugging tutorial later this week!) Q1 Attributes of TicTacToeWithin class TicTacToe, define the attributes described above
###Code
class TicTacToe:
# create winner and start_player parameters with default values as empty
# strings within __init__
def __init__(self, winner='', start_player=''):
##################################
########### Attributes ###########
##################################
# set self.winner and self.start_player with the parameters from __init__
self.winner = winner
self.start_player = start_player
# set self.board as a dictionary with ' ' as values and 1-9 as keys
self.board = {1: ' ',
2: ' ',
3: ' ',
4: ' ',
5: ' ',
6: ' ',
7: ' ',
8: ' ',
9: ' ',}
# set self.win_patterns with the 8 winning patterns (a list of lists)
self.win_patterns = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9],
[1,5,9], [7,5,3]]
###Output
_____no_output_____
###Markdown
Q2 Methods of TicTacToeHere now we will define the methods of `TicTacToe`. Paste your attributes from the above cell, into the bellow cell so that your changes carry over.
###Code
class TicTacToe:
# create winner and start_player parameters with default values as empty
# strings within __init__
def __init__(self, winner='', start_player=''):
##################################
########### Attributes ###########
##################################
# set self.winner and self.start_player with the parameters from __init__
self.winner = winner
self.start_player = start_player
# set self.board as a dictionary with ' ' as values and 1-9 as keys
self.board = {1: ' ',
2: ' ',
3: ' ',
4: ' ',
5: ' ',
6: ' ',
7: ' ',
8: ' ',
9: ' ',}
# set self.win_patterns with the 8 winning patterns (a list of lists)
self.win_patterns = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9],
[1,5,9], [7,5,3]]
###############################
########### METHODS ###########
###############################
# the other functions are now passed self
# define visualize_board and update the board
# object with self.board (and maybe self.board.values() depending on how your
# visualize_board function is written)
def visualize_board(self):
"""
Visualizes the board during gameplay
Parameters
----------
board_values : list
The values ('X', 'O', or ' ' at each board location)
Returns
-------
None
"""
print(
"|{}|{}|{}|\n|{}|{}|{}|\n|{}|{}|{}|\n".format(*self.board.values())
)
# define check_winning and similarly update win_patterns,
# board, and winner to be accessed via the self. Be sure to update the
# attribute self.winner with the appropriate winner in the function
def check_winning(self):
"""
Checks if the game has a winner
Parameters
----------
board : dict
the tictactoe board as a dictionary
Returns
-------
win_statement : str
defaults to an empty string if no winner. Otherwise 'X' Won! or 'O' Won!
"""
for pattern in self.win_patterns:
values = [self.board[i] for i in pattern]
if values == ['X', 'X', 'X']:
self.winner = 'X'
return "'X' Won!"
elif values == ['O', 'O', 'O']:
self.winner = 'O'
return "'O' Won!"
return ''
# here the definition of check_stalemate is given
def check_stalemate(self):
if ' ' not in self.board.values():
self.winner = 'Stalemate'
return "It's a stalemate!"
###Output
_____no_output_____
###Markdown
2.1.3 The Game Engine (just a function for now)Next we'll create a function that runs game play using TicTacToe as an object that it passes around. I've already done the heavy lifting of replacing references to attributes (board, win_patterns) and methods (visualize_board, check_winning) to pass through the `TicTacToe` object. I also added the option for the user to quit the game by typing in `'q'` to the input line if they would like. Q3 Add Condition for Stalemate
###Code
def play_game():
print("'X' will go first!")
tic_tac_toe = TicTacToe()
while True:
for player in (['X', 'O']):
tic_tac_toe.visualize_board()
move = input("{}, what's your move?".format(player))
####################################################################
# we're going to allow the user to quit the game from the input line
####################################################################
if move in ['q', 'quit']:
tic_tac_toe.winner = 'F'
print('quiting the game')
break
move = int(move)
if tic_tac_toe.board[move] != ' ':
while True:
move = input("{}, that position is already taken! "\
"What's your move?".format(player))
move = int(move)
if tic_tac_toe.board[move] != ' ':
continue
else:
break
tic_tac_toe.board[move] = player
# the winner varaible will now be checked within the board object
tic_tac_toe.check_winning()
##############################
# CALL check_stalemate() BELOW
##############################
tic_tac_toe.check_stalemate()
if tic_tac_toe.winner == '':
continue
##########################################################################
# write an elif statement that checks if self.winner is 'Stalemate' and
# subsequently visualizes the board and breaks out of the while loop
# also print out check_stalemate so the returned string is shown to the
# user
##########################################################################
elif tic_tac_toe.winner == 'Stalemate':
tic_tac_toe.visualize_board()
print(tic_tac_toe.check_stalemate())
break
else:
print(tic_tac_toe.check_winning())
tic_tac_toe.visualize_board()
break
if tic_tac_toe.winner != '':
break
###Output
_____no_output_____
###Markdown
Let's test our new module
###Code
play_game()
###Output
_____no_output_____ |
Heroes of Pymoli/Heroes Of Pymoli.Final.ipynb | ###Markdown
Heroes Of Pymoli Data Analysis* Of the 1163 active players, the vast majority are male (84%). There also exists, a smaller, but notable proportion of female players (14%).* Our peak age demographic falls between 20-24 (44.8%) with secondary groups falling between 15-19 (18.60%) and 25-29 (13.4%). ----- Observable Trends Based On Data * Female players and non-disclosed players spent more on average at 6% and 11% higher than their male counterparts respectively even though males outnumber them by at least 5-1. * A substantial 3/4 of players are between 15 and 29 years old with their average spend at $2.99. However, that is 6% lower than the remaining 1/4 of the players between the ages of <10 to 14, and 30 to 40+ years old at $3.16. The age demographic that has the highest average purchases per person are the 35 to 39 year olds with $3.60 that make up almost a 20% difference than 15 - 29 year olds who total 76.8% of players, meaning they have the highest purchasing power.* The top 5 spenders spent on average $3.45 per purchase with a total purchase value of $13.32. * Oathbreaker (Last Hope of the Breaking Storm), Fiery Glass Crusader and Nirvana are the most profitable items and are also 3 of the top 4 most popular items. Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load, encoding="ISO-8859-1")
purchase_data.head()
###Output
_____no_output_____
###Markdown
Player Count * Display the total number of players
###Code
# Total Number of Players
player_count = len(purchase_data["SN"].unique())
player_count
# Create Summary DataFrame
player_count_table = pd.DataFrame({"Total Players": [player_count]})
player_count_table
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Total) * Run basic calculations to obtain number of unique items, average price, etc.* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
# Number of Unique Items
unique_items_count = len(purchase_data["Item ID"].unique())
# Average Purchase Price
avg_purchase_price = purchase_data["Price"].mean()
# Total Number of Purchases
total_purchases = len(purchase_data["Purchase ID"].unique())
# Total Revenue
total_revenue = purchase_data["Price"].sum()
# Create Summary DataFrame
purchasing_analysis_table = pd.DataFrame([{
"Number of Unique Items": unique_items_count,
"Average Price": avg_purchase_price,
"Number of Purchases": total_purchases,
"Total Revenue": total_revenue,
}], columns=["Number of Unique Items", "Average Price", "Number of Purchases", "Total Revenue"])
purchasing_analysis_table["Average Price"] = purchasing_analysis_table["Average Price"].map("${0:.2f}".format)
purchasing_analysis_table["Total Revenue"] = purchasing_analysis_table["Total Revenue"].map("${0:,.2f}".format)
purchasing_analysis_table
###Output
_____no_output_____
###Markdown
Gender Demographics * Percentage and Count of Male Players* Percentage and Count of Female Players* Percentage and Count of Other / Non-Disclosed
###Code
# Count & Percentage of Male Players
male_players = purchase_data.loc[purchase_data["Gender"] == "Male"]
male_count = len(male_players["SN"].unique())
male_percent = "{:.2f}%".format(male_count / player_count * 100)
# Count & Percentage of Female Players
female_players = purchase_data.loc[purchase_data["Gender"] == "Female"]
female_count = len(female_players["SN"].unique())
female_percent = "{:.2f}%".format(female_count / player_count * 100)
# Count & Percentage of Other / Non-Disclosed
other_players = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed"]
other_count = len(other_players["SN"].unique())
other_percent = "{:.2f}%".format(other_count / player_count * 100)
# Create Summary DataFrame
gender_demographics_table = pd.DataFrame([{
"Gender": "Male", "Total Count": male_count,
"Percentage of Players": male_percent},
{"Gender": "Female", "Total Count": female_count,
"Percentage of Players": female_percent},
{"Gender": "Other / Non-Disclosed", "Total Count": other_count,
"Percentage of Players": other_percent
}], columns=["Gender", "Total Count", "Percentage of Players"])
gender_demographics_table = gender_demographics_table.set_index("Gender")
gender_demographics_table.index.name = None
gender_demographics_table
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Gender) * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
# Purchase Count of Male Players
male_purchase_data = purchase_data.loc[purchase_data["Gender"] == "Male", :]
male_purchase_count = len(male_purchase_data)
# Average Purchase Price of Male Players
avg_male_purchase_price = purchase_data.loc[purchase_data["Gender"] == "Male", ["Price"]].mean()
# Total Purchase Value of Male Players
total_male_purchase_value = purchase_data.loc[purchase_data["Gender"] == "Male", ["Price"]].sum()
# Purchase Count of Female Players
female_purchase_data = purchase_data.loc[purchase_data["Gender"] == "Female", :]
female_purchase_count = len(female_purchase_data)
# Average Purchase Price of Female Players
avg_female_purchase_price = purchase_data.loc[purchase_data["Gender"] == "Female", ["Price"]].mean()
# Total Purchase Value of Female Players
total_female_purchase_value = purchase_data.loc[purchase_data["Gender"] == "Female", ["Price"]].sum()
# Purchase Count of Other / Non-Disclosed Players
other_purchase_data = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed", :]
other_purchase_count = len(other_purchase_data)
# Average Purchase Price of Other / Non-Disclosed Players
avg_other_purchase_price = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed", ["Price"]].mean()
# Total Purchase Value of Other / Non-Disclosed Players
total_other_purchase_value = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed", ["Price"]].sum()
# Average Purchase Total per Person by Gender
avg_male_purchase_total_person = total_male_purchase_value / male_count
avg_female_purchase_total_person = total_female_purchase_value / female_count
avg_other_purchase_total_person = total_other_purchase_value / other_count
# Create Summary DataFrame
gender_purchasing_analysis_table = pd.DataFrame([{
"Gender": "Female", "Purchase Count": female_purchase_count,
"Average Purchase Price": "${:.2f}".format(avg_female_purchase_price[0]),
"Total Purchase Value": "${:.2f}".format(total_female_purchase_value[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_female_purchase_total_person[0])},
{"Gender": "Male", "Purchase Count": male_purchase_count,
"Average Purchase Price": "${:.2f}".format(avg_male_purchase_price[0]),
"Total Purchase Value": "${:,.2f}".format(total_male_purchase_value[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_male_purchase_total_person[0])},
{"Gender": "Other / Non-Disclosed", "Purchase Count": other_purchase_count,
"Average Purchase Price": "${:.2f}".format(avg_other_purchase_price[0]),
"Total Purchase Value": "${:.2f}".format(total_other_purchase_value[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_other_purchase_total_person[0])
}], columns=["Gender", "Purchase Count", "Average Purchase Price", "Total Purchase Value", "Avg Total Purchase per Person"])
gender_purchasing_analysis_table = gender_purchasing_analysis_table.set_index("Gender")
gender_purchasing_analysis_table.index.name = None
gender_purchasing_analysis_table
###Output
_____no_output_____
###Markdown
Age Demographics * Establish bins for ages* Categorize the existing players using the age bins. Hint: use pd.cut()* Calculate the numbers and percentages by age group* Create a summary data frame to hold the results* Optional: round the percentage column to two decimal points* Display Age Demographics Table
###Code
# Figure Out Minimum and Maximum Ages
# print(purchase_data["Age"].max())
# print(purchase_data["Age"].min())
# Establish Bins for Ages & Create Corresponding Names For Bins
age_bins = [0, 9, 14, 19, 24, 29, 34, 39, 46]
groups_names = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
# Place Data Series Into New Column Inside DataFrame
purchase_data["Age Group"] = pd.cut(purchase_data["Age"], bins=age_bins, labels=groups_names)
purchase_data
# Create a GroupBy Object Based Upon "Age Group"
age_group = purchase_data.groupby("Age Group")
# Count Total Players by Age Category
total_count_age = age_group["SN"].nunique()
# Calculate Percentages by Age Category
percentage_by_age = round(total_count_age / player_count * 100,2)
# Create Summary DataFrame
age_demographics_table = pd.DataFrame({
"Total Count": total_count_age,
"Percentage of Players": percentage_by_age
})
age_demographics_table["Percentage of Players"] = age_demographics_table["Percentage of Players"].map("{0:,.2f}%".format)
age_demographics_table.index.name = None
age_demographics_table
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Age) * Bin the purchase_data data frame by age* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
# Establish Bins for Ages & Create Corresponding Names For The Bins
bins = [0, 9, 14, 19, 24, 29, 34, 39, 46]
groups_names = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
# Place Data Series Into New Column Inside DataFrame
purchase_data["Age Group"] = pd.cut(purchase_data["Age"], bins=age_bins, labels=groups_names)
# Calculate "Purchase Count"
age_purchase_count = age_group["SN"].count()
# Calculate "Average Purchase Price"
avg_age_purchase_price = round(age_group["Price"].mean(),2)
# Calculate "Total Purchase Value"
total_age_purchase_value = round(age_group["Price"].sum(),2)
# Calculate "Avg Total Purchase per Person"
avg_total_age_purchase_person = round(total_age_purchase_value / total_count_age,2)
# Create Summary DataFrame
age_purchasing_analysis_table = pd.DataFrame({
"Purchase Count": age_purchase_count,
"Average Purchase Price": avg_age_purchase_price,
"Total Purchase Value": total_age_purchase_value,
"Avg Total Purchase per Person": avg_total_age_purchase_person
})
age_purchasing_analysis_table["Average Purchase Price"] = age_purchasing_analysis_table["Average Purchase Price"].map("${0:,.2f}".format)
age_purchasing_analysis_table["Total Purchase Value"] = age_purchasing_analysis_table["Total Purchase Value"].map("${0:,.2f}".format)
age_purchasing_analysis_table["Avg Total Purchase per Person"] = age_purchasing_analysis_table["Avg Total Purchase per Person"].map("${0:,.2f}".format)
age_purchasing_analysis_table
###Output
_____no_output_____
###Markdown
Top Spenders * Run basic calculations to obtain the results in the table below* Create a summary data frame to hold the results* Sort the total purchase value column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame
###Code
# Identify the Top 5 Spenders in the Game by Total Purchase Value & GroupBy "SN"
top_spenders = purchase_data.groupby("SN")
# Calculate "Purchase Count"
spender_purchase_count = top_spenders["Purchase ID"].count()
# Calculate "Average Purchase Price"
average_spender_purchase_price = round(top_spenders["Price"].mean(),2)
# Calculate "Total Purchase Value"
total_spender_purchase_value = top_spenders["Price"].sum()
# Create Summary DataFrame
top_spenders_table = pd.DataFrame({
"Purchase Count": spender_purchase_count,
"Average Purchase Price": average_spender_purchase_price,
"Total Purchase Value": total_spender_purchase_value
})
sort_top_spenders = top_spenders_table.sort_values(["Total Purchase Value"], ascending=False).head()
sort_top_spenders["Average Purchase Price"] = sort_top_spenders["Average Purchase Price"].astype(float).map("${:,.2f}".format)
sort_top_spenders["Total Purchase Value"] = sort_top_spenders["Total Purchase Value"].astype(float).map("${:,.2f}".format)
sort_top_spenders
###Output
_____no_output_____
###Markdown
Most Popular Items * Retrieve the Item ID, Item Name, and Item Price columns* Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value* Create a summary data frame to hold the results* Sort the purchase count column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame
###Code
# Identify the 5 Most Popular Items by Creating New DataFrame
popular_items_list = purchase_data[["Item ID", "Item Name", "Price"]]
# GroupBy "Item ID" & "Item Name"
popular_items = popular_items_list.groupby(["Item ID","Item Name"])
# Calculate "Purchase Count"
item_purchase_count = popular_items["Price"].count()
# Calculate "Item Price"
item_price = popular_items["Price"].sum()
# Calculate "Total Purchase Value"
item_purchase_value = item_price / item_purchase_count
# Create Summary DataFrame
most_popular_items = pd.DataFrame({
"Purchase Count": item_purchase_count,
"Item Price": item_purchase_value,
"Total Purchase Value": item_price
})
popular_items_formatted = most_popular_items.sort_values(["Purchase Count"], ascending=False).head()
popular_items_formatted["Item Price"] = popular_items_formatted["Item Price"].astype(float).map("${:,.2f}".format)
popular_items_formatted["Total Purchase Value"] = popular_items_formatted["Total Purchase Value"].astype(float).map("${:,.2f}".format)
popular_items_formatted
###Output
_____no_output_____
###Markdown
Most Profitable Items * Sort the above table by total purchase value in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the data frame
###Code
popular_items_formatted = most_popular_items.sort_values(["Total Purchase Value"], ascending=False).head()
popular_items_formatted["Item Price"] = popular_items_formatted["Item Price"].astype(float).map("${:,.2f}".format)
popular_items_formatted["Total Purchase Value"] = popular_items_formatted["Total Purchase Value"].astype(float).map("${:,.2f}".format)
popular_items_formatted
# Export File As CSV
purchase_data.to_csv("Output/purchase_data_revised.csv", index=False, header=True)
###Output
_____no_output_____ |
Sentiment Analysis Model/.ipynb_checkpoints/Imdb Movie Reviews Sentiment Analysis(GRU)-checkpoint.ipynb | ###Markdown
Multiple Layer GRU
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow_datasets as tfds
import tensorflow as tf
print(tf.__version__)
import tensorflow_datasets as tfds
import tensorflow as tf
print(tf.__version__)
# Get the data
dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
tokenizer = info.features['text'].encoder
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_dataset = train_dataset.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.padded_batch(BATCH_SIZE, train_dataset.output_shapes)
test_dataset = test_dataset.padded_batch(BATCH_SIZE, test_dataset.output_shapes)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, 64),
tf.keras.layers.Conv1D(128, 5, activation='relu'),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
NUM_EPOCHS = 10
history = model.fit(train_dataset, epochs=NUM_EPOCHS, validation_data=test_dataset)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, 'accuracy')
plot_graphs(history, 'loss')
###Output
_____no_output_____ |
Spring2021-J/Sections/Coding Bootcamps/Coding Bootcamp Part 4.ipynb | ###Markdown
R Bootcamp Part 4 ggplot2One of the sad facts about (most) economic research papers is that they don't always have the most aesthetically pleasing figures. For many data visualization applications or our own work we might want to have more control over the visuals and step them up a notch, making sure they convey useful information and have informative labels/captions. This is where the **ggplot2** package comes in.We started off using **R's** built-in plot function, which let us produce scatterplots and construct histograms of all sorts of variables. However, it doesn't look the best and has some ugly naming conventions. **ggplot2** will give us complete control over our figure and allow us to get as in depth with it as we want.**ggplot2** is part of the **tidyverse** package, so we'll need to load that in before we get started.For today, let's load a few packages and read in a dataset on sleep quality and time allocation for 706 individuals. This dataset is saved to the section folder as `sleep75.dta`.
###Code
library(tidyverse)
library(haven)
sleepdata <- read_dta("sleep75.dta")
set.seed("12345") # sets the random seed so we get the same results later on from our random draws
###Output
_____no_output_____
###Markdown
ggplot2 Basic SyntaxLet's start by getting familiar with the basic syntax of __ggplot2__. It's syntax is a little bit different than some of the functions we've used before, but once we figure it out it makes thing nice and easy as we make more and more professional-looking figures. It also plays nicely with pipes!To start a plot, we start with the function `ggplot()`This function initializes an empty plot and passes data to other plots that we'll add on top. We can also use this function to define our dataset or specify what our x and y variables are.Try starting a new plot by running `ggplot()` below: Okay, so not the most impressive graphic yet. We get a little bit more if we specify our data and our x/y variables. To specify the data, we add the argument `data = "dataname"` to the `ggplot()` function. To specify which variable is on the x axis and which is on the y, we use the `aes(x= "xvar", y= "yvar")` argument. `aes()` is short for "aesthetics" and allows us to automatically pass these variables along as our x and y variables for the plots we add.Let's say we're interested in using our `sleepdata` to see the relationship between age and hourly wage in our sample:`ggplot(data = sleepdata, aes(x = age, y = hrwage))` That is a start! Now we have labels on both of our axes corresponding to the assigned variable, and a grid corresponding to possible values of those variables. This makes sense, as we told **R** with `aes()` what our x variable and y variable are, and it then automatically sets up tick marks based on our data.We will add geometries (sets of points, histograms, lines, etc.) by adding what we call "layers" using a `+` after our `ggplot()` function. Let's take a look at a few of the options. ScatterplotsNow let's add some points! If we want to get a sense of how age and hourly wage vary in our data, we can do that by just plotting the points. We add (x,y) points using the funciton`geom_point()`Since we already declared our two variables, all we need to add `+ geom_point()` to our existing code: `ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point()` And we get a a plot of all our points (note that we were warned that there are some missing values that get dropped). LabelsSometimes we might want to change the labels from the variable names to a more descriptive label, and possibly add a title. We can do that! We do this by adding the `labs()` function to our plot.`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", caption = "Note: prepared using Wooldridge's sleep75 data.", x = "Age (years)", y = "Hourly Wage ($)")` Let's take a look at what we added to `labs()`. * First, `title` gives us the main title at the top.* `subtitle` gives us another line in a smaller font below the main title. * `caption` adds a note at the bottom of the plot* `x` and `y` correspond to our x and y labels, respectively. * We can specify as many/few of these elements as we want, but just make sure to separate them by commas Changing PointsWhat if we want to change the color/shape/transparency of our points? We can do that by adding optimal arguments to `geom_point()`.`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(colour = "blue", alpha = 0.4, size = 0.8) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)")` By adding `colour="blue"` we changed the color to blue. There are [a toooooon](http://sape.inf.usi.ch/sites/default/files/ggplot2-colour-names.png) of named colors that we could use instead (this gets really useful when we start splitting our data by group levels).`alpha = 0.4` is changing the transparency of our points to 40%. `size = 0.8` is reducing the size of the points to 80% of their original size. Splitting by GroupsWhat if we wanted to change the color of our points according to whether the individual is male or not? We can do that by adding an `aes()` to geom_point!`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male))) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, by Gender", x = "Age (years)", y = "Hourly Wage ($)")` By adding an aesthestic to our `geom_point` we can set the color to be determined by the value of $male$. By default, the zero value (i.e. female) gets a red color while a 1 value (female) gets a light green. We specify the variable as a `factor()` so that ggplot knows it is a discrete variable. What if we instead wanted to change color on a continuous scale?`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = age)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, by Age", x = "Age (years)", y = "Hourly Wage ($)")` Here the color is now a function of our continuous variable $age$, taking increasingly lighter values for higher ages.(note that __ggplot2__ lets you specify the color scale or color levels if you want, as well as nitpick the labels in the legend. In reality we can change anything that appears in the plot - we just have to choose the right option). One thing to note is that we can make other options conditional on variables in our data frame too. What if we wanted the shape of our points to depend on union participation, the color to vary with gender, and the size of the points to depend on the total minutes worked per week? We can do all that - even if it might look real gross:`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male), shape = factor(union), size = totwrk)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, too many things going on", x = "Age (years)", y = "Hourly Wage ($)")` While the above example is cluttered, it shows how we can take a simple scatterplot and use it to convey additional information in just one plot. LinesWe can add lines to our figure in a couple different ways. First, if we wanted to connect all the points in our data with a line, we would use the `geom_line()` layer. For example, let's say we want to plot the mean hourly wage for each year of age in our data, this time dropping the NA values so ggplot doesn't give us a warning:`sleepdata %>% group_by(age) %>% drop_na(age, hrwage) %>% summarise(hrwage = mean(hrwage)) %>% ggplot(aes(x=age, y = hrwage)) + geom_line()`
###Code
###Output
_____no_output_____
###Markdown
R Bootcamp Part 4 ggplot2One of the sad facts about (most) economic research papers is that they don't always have the most aesthetically pleasing figures. For many data visualization applications or our own work we might want to have more control over the visuals and step them up a notch, making sure they convey useful information and have informative labels/captions. This is where the **ggplot2** package comes in.We started off using **R's** built-in plot function, which let us produce scatterplots and construct histograms of all sorts of variables. However, it doesn't look the best and has some ugly naming conventions. **ggplot2** will give us complete control over our figure and allow us to get as in depth with it as we want.**ggplot2** is part of the **tidyverse** package, so we'll need to load that in before we get started.For today, let's load a few packages and read in a dataset on sleep quality and time allocation for 706 individuals. This dataset is saved to the section folder as `sleep75.dta`.
###Code
library(tidyverse)
library(haven)
sleepdata <- read_dta("sleep75.dta")
set.seed("12345") # sets the random seed so we get the same results later on from our random draws
###Output
_____no_output_____
###Markdown
ggplot2 Basic SyntaxLet's start by getting familiar with the basic syntax of __ggplot2__. It's syntax is a little bit different than some of the functions we've used before, but once we figure it out it makes thing nice and easy as we make more and more professional-looking figures. It also plays nicely with pipes!To start a plot, we start with the function `ggplot()`This function initializes an empty plot and passes data to other plots that we'll add on top. We can also use this function to define our dataset or specify what our x and y variables are.Try starting a new plot by running `ggplot()` below: Okay, so not the most impressive graphic yet. We get a little bit more if we specify our data and our x/y variables. To specify the data, we add the argument `data = "dataname"` to the `ggplot()` function. To specify which variable is on the x axis and which is on the y, we use the `aes(x= "xvar", y= "yvar")` argument. `aes()` is short for "aesthetics" and allows us to automatically pass these variables along as our x and y variables for the plots we add.Let's say we're interested in using our `sleepdata` to see the relationship between age and hourly wage in our sample:`ggplot(data = sleepdata, aes(x = age, y = hrwage))` That is a start! Now we have labels on both of our axes corresponding to the assigned variable, and a grid corresponding to possible values of those variables. This makes sense, as we told **R** with `aes()` what our x variable and y variable are, and it then automatically sets up tick marks based on our data.We will add geometries (sets of points, histograms, lines, etc.) by adding what we call "layers" using a `+` after our `ggplot()` function. Let's take a look at a few of the options. ScatterplotsNow let's add some points! If we want to get a sense of how age and hourly wage vary in our data, we can do that by just plotting the points. We add (x,y) points using the funciton`geom_point()`Since we already declared our two variables, all we need to add `+ geom_point()` to our existing code: `ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point()` And we get a a plot of all our points (note that we were warned that there are some missing values that get dropped). LabelsSometimes we might want to change the labels from the variable names to a more descriptive label, and possibly add a title. We can do that! We do this by adding the `labs()` function to our plot.`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point() + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", caption = "Note: prepared using Wooldridge's sleep75 data.", x = "Age (years)", y = "Hourly Wage ($)")` Let's take a look at what we added to `labs()`. * First, `title` gives us the main title at the top.* `subtitle` gives us another line in a smaller font below the main title. * `caption` adds a note at the bottom of the plot* `x` and `y` correspond to our x and y labels, respectively. * We can specify as many/few of these elements as we want, but just make sure to separate them by commas Changing PointsWhat if we want to change the color/shape/transparency of our points? We can do that by adding optimal arguments to `geom_point()`.`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(colour = "blue", alpha = 0.4, size = 0.8) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample", x = "Age (years)", y = "Hourly Wage ($)")` By adding `colour="blue"` we changed the color to blue. There are [a toooooon](http://sape.inf.usi.ch/sites/default/files/ggplot2-colour-names.png) of named colors that we could use instead (this gets really useful when we start splitting our data by group levels).`alpha = 0.4` is changing the transparency of our points to 40%. `size = 0.8` is reducing the size of the points to 80% of their original size. Splitting by GroupsWhat if we wanted to change the color of our points according to whether the individual is male or not? We can do that by adding an `aes()` to geom_point!`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male))) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, by Gender", x = "Age (years)", y = "Hourly Wage ($)")` By adding an aesthestic to our `geom_point` we can set the color to be determined by the value of $male$. By default, the zero value (i.e. female) gets a red color while a 1 value (female) gets a light green. We specify the variable as a `factor()` so that ggplot knows it is a discrete variable. What if we instead wanted to change color on a continuous scale?`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = age)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, by Age", x = "Age (years)", y = "Hourly Wage ($)")` Here the color is now a function of our continuous variable $age$, taking increasingly lighter values for higher ages.(note that __ggplot2__ lets you specify the color scale or color levels if you want, as well as nitpick the labels in the legend. In reality we can change anything that appears in the plot - we just have to choose the right option). One thing to note is that we can make other options conditional on variables in our data frame too. What if we wanted the shape of our points to depend on union participation, the color to vary with gender, and the size of the points to depend on the total minutes worked per week? We can do all that - even if it might look real gross:`ggplot(data = sleepdata, aes(x = age, y = hrwage)) + geom_point(aes(colour = factor(male), shape = factor(union), size = totwrk)) + labs(title = "Relationship between Age and Hourly Wage", subtitle = "Nonmissing Sample, too many things going on", x = "Age (years)", y = "Hourly Wage ($)")` While the above example is cluttered, it shows how we can take a simple scatterplot and use it to convey additional information in just one plot. LinesWe can add lines to our figure in a couple different ways. First, if we wanted to connect all the points in our data with a line, we would use the `geom_line()` layer. For example, let's say we want to plot the mean hourly wage for each year of age in our data, this time dropping the NA values so ggplot doesn't give us a warning:`sleepdata %>% group_by(age) %>% drop_na(age, hrwage) %>% summarise(hrwage = mean(hrwage)) %>% ggplot(aes(x=age, y = hrwage)) + geom_line()`
###Code
###Output
_____no_output_____ |
notebooks/model/population2010-2019/population_pickle.ipynb | ###Markdown
Create Prediction Forecast Endpoint1. Load CSV 2. Test Pickle3. Create Prediction Function4. Create Visualization Function5. Test Prediction and Visualization Function
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fbprophet import Prophet
###Output
/Users/jisha/.pyenv/versions/3.8.6/lib/python3.8/site-packages/pandas/compat/__init__.py:120: UserWarning: Could not import the lzma module. Your installed Python is incomplete. Attempting to use lzma compression will result in a RuntimeError.
warnings.warn(msg)
###Markdown
1. Load CSV
###Code
population_melt = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_melt.csv')
population = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv')
###Output
_____no_output_____
###Markdown
3. Create Prediction and Visualization Function
###Code
def population_forecast(city, periods):
# Load Dataset
population = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv')
population.reset_index(level=0, inplace=True)
# Melt table into ds and y
population_melt = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population_melt.melt(id_vars=['City,State'], var_name='ds', value_name='y')
# Isolate city data
city = [city]
df_ = population_melt.loc[population_melt['City,State'].isin(city)][['ds', 'y']]
df_.columns = ['ds','y']
# Fit and Predict on city dataframe
m = Prophet(interval_width=0.95)
m.fit(df_)
future = m.make_future_dataframe(periods=periods, freq='Y')
forecast = m.predict(future)
predictions = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']][9:]
predictions['ds'] = pd.DatetimeIndex(predictions['ds']).year
predictions[['yhat', 'yhat_lower', 'yhat_upper']] = predictions[['yhat', 'yhat_lower', 'yhat_upper']].round()
# Create graph
# Graph first 10 years
df_['ds'] = df_['ds'].astype(int)
ax = df_.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
# Graph predictions including the upper and lower bounds
predictions['ds'] = predictions['ds'].astype(int)
predictions[['ds', 'yhat']].plot(ax = ax, x = 'ds', y = 'yhat', label = "Forecast")
ax.fill_between(predictions['ds'],
predictions['yhat_lower'],
predictions['yhat_upper'],
color='k',
alpha=.25)
ax.set_xlabel('Year')
ax.set_ylabel('Population')
plt.title(f"{city[0]} Population" )
plt.legend()
return plt.show()
population_forecast('Akron, OH', 10)
###Output
INFO:fbprophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.
INFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.
INFO:fbprophet:n_changepoints greater than number of observations. Using 7.
###Markdown
4. Create Visualization Function- no pred
###Code
POPULATION_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv'
FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_prediction.csv'
def get_plot(city):
city = [city]
# Historical population data
population = pd.read_csv(POPULATION_CSV)
population = population[population['City,State'].isin(city)]
population = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population.melt(id_vars=['City,State'], var_name='ds', value_name='y')
population_melt['ds'] = population_melt['ds'].astype(int)
# Predictions
forecast = pd.read_csv(FORECAST_CSV)
df = forecast[forecast['City,State'].isin(city)][9:]
df['year'] = df['year'].astype(int)
# Graph Data
ax = population_melt.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
df[['year', 'yhat']].plot(ax = ax, x = 'year', y = 'yhat', label = "Forecast")
# Fill to show upper and lower bounds
ax.fill_between(df['year'],
df['yhat_lower'],
df['yhat_upper'],
color='k',
alpha=.25)
ax.set_xlabel('Year')
ax.set_ylabel('Population')
plt.title(f"{city[0]} Population" )
plt.legend()
plt.show()
get_plot('Akron, OH')
###Output
_____no_output_____
###Markdown
Create Prediction Forecast Endpointd1. Load CSV 2. Create and Test Pickle3. Create Prediction Function4. Create Visualization Function5. Test Prediction and Visualization Function
###Code
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from fbprophet import Prophet
###Output
_____no_output_____
###Markdown
1. Load CSV
###Code
population_melt = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_melt.csv')
population = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv')
###Output
_____no_output_____
###Markdown
2. Create and Test Pickle Create a dictionary- zip city list- create a list of grouped dataframe
###Code
cities_list = list(population['City,State'])
###Output
_____no_output_____
###Markdown
Fit FB Prophet model- fit model on dataframe- create multiple models - pickle models was unable to complete this process. The model took too long to even pickle for it to be useful as a model. 3. Create Prediction and Visualization Function
###Code
# this model works but just locally
def population_forecast(city, periods):
# Load Dataset
population = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv')
population.reset_index(level=0, inplace=True)
# Melt table into ds and y
population_melt = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population_melt.melt(id_vars=['City,State'], var_name='ds', value_name='y')
# Isolate city data
city = [city]
df_ = population_melt.loc[population_melt['City,State'].isin(city)][['ds', 'y']]
print(df_)
df_.columns = ['ds','y']
# Fit and Predict on city dataframe
# m = Prophet(interval_width=0.95)
m.fit(df_)
future = m.make_future_dataframe(periods=periods, freq='Y')
forecast = m.predict(future)
predictions = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']][9:]
predictions['ds'] = predictions['ds'].dt.year
print(predictions['ds'])
predictions[['yhat', 'yhat_lower', 'yhat_upper']] = predictions[['yhat', 'yhat_lower', 'yhat_upper']].round()
print(predictions.tail())
# Create graph
# Graph first 10 years
df_['ds'] = df_['ds'].astype(int)
ax = df_.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
# Graph predictions including the upper and lower bounds
predictions['ds'] = predictions['ds'].astype(int)
predictions[['ds', 'yhat']].plot(ax = ax, x = 'ds', y = 'yhat', label = "Forecast")
ax.fill_between(predictions['ds'],
predictions['yhat_lower'],
predictions['yhat_upper'],
color='k',
alpha=.25)
ax.set_xlabel('Year')
ax.set_ylabel('Population')
plt.title(f"{city[0]} Population" )
plt.legend()
return plt.show()
###Output
_____no_output_____
###Markdown
4. Create Visualization Function- using saved predictions
###Code
POPULATION_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv'
FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_prediction.csv'
def get_plot(city):
city = [city]
# Historical population data
population = pd.read_csv(POPULATION_CSV)
population = population[population['City,State'].isin(city)]
population = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population.melt(id_vars=['City,State'], var_name='ds', value_name='y')
population_melt['ds'] = population_melt['ds'].astype(int)
# Predictions
forecast = pd.read_csv(FORECAST_CSV)
df = forecast[forecast['City,State'].isin(city)][9:]
df['year'] = df['year'].astype(int)
# Graph Data
ax = population_melt.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
df[['year', 'yhat']].plot(ax = ax, x = 'year', y = 'yhat', label = "Forecast")
# Fill to show upper and lower bounds
ax.fill_between(df['year'],
df['yhat_lower'],
df['yhat_upper'],
color='k',
alpha=.25)
ax.set_xlabel('Year')
ax.set_ylabel('Population')
plt.title(f"{city[0]} Population" )
plt.legend()
plt.show()
# @router.post('/api/population_forecast')
# def population_forecast(city:City, periods=10):
# """
# Create visualization of historical and forecasted population
# args:
# - city: str -> The target city
# - periods: int -> number of years to forecast for
# Returns:
# Visualization of population forecast
# - 10 year of historical data
# - forecasts for number of years entered
# """
# city = validate_city(city)
# # Load Dataset
# population = pd.read_csv('https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv')
# population.reset_index(level=0, inplace=True)
# # Melt table into ds and y
# population_melt = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
# population_melt = population_melt.melt(id_vars=['City,State'], var_name='ds', value_name='y')
# # Isolate city data
# location = [city.city + ', ' + city.state]
# df_ = population_melt.loc[population_melt['City,State'].isin(location)][['ds','y']]
# df_.columns = ['ds','y']
# # Fit and Predict on city dataframe
# # Model
# with open("app/data/pickle_model/model.pkl", "rb") as f:
# m = load(f)
# m = Prophet(interval_width=0.95)
# # Fit model
# m.fit(df_)
# future = m.make_future_dataframe(periods=periods, freq='Y')
# # Predict
# forecast = m.predict(future)
# predictions = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']][9:]
# predictions['ds'] = pd.DatetimeIndex(predictions['ds']).year
# predictions[['yhat', 'yhat_lower', 'yhat_upper']] = predictions[['yhat', 'yhat_lower', 'yhat_upper']].round()
# # Create graph
# # Graph first 10 years
# df_['ds'] = df_['ds'].astype(int)
# predictions['ds'] = predictions['ds'].astype(int)
# # Graph historical data
# fig = go.Figure()
# fig.add_trace(go.Scatter(
# name = 'Original',
# x = list(df_['ds']),
# y = list(df_['y']),
# fill = None,
# mode = 'lines',
# line_color = 'black',
# showlegend = True
# ))
# # Graph predictions including the upper and lower bounds
# fig.add_trace(go.Scatter(
# name = 'Forecast',
# x = list(predictions['ds']),
# y = list(predictions['yhat']),
# fill = None,
# mode = 'lines',
# line_color = 'red',
# showlegend = True
# ))
# fig.add_trace(go.Scatter(
# name = 'Lower Bound',
# x = list(predictions['ds']),
# y = list(predictions['yhat_lower']),
# fill = None,
# mode = 'lines',
# line_color = 'gray',
# ))
# fig.add_trace(go.Scatter(
# name = 'Upper Bound',
# x = list(predictions['ds']),
# y = list(predictions['yhat_upper']),
# fill='tonexty',
# mode='lines',
# line_color = 'gray',
# ))
# # Edit the layout
# fig.update_layout({
# 'autosize':True,
# 'title': f'{city[0]} Population Forecast',
# 'title_x': 0.5,
# 'xaxis_title': 'Year',
# 'yaxis_title': 'Population'
# })
# fig.update_yaxes(automargin = True,)
# fig.update_xaxes(automargin = True, nticks=20)
# fig.show()
# return fig.to_json
###Output
_____no_output_____ |
courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# Use the %tensorflow_version magic if in colab.
%tensorflow_version 2.x
except Exception:
pass
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# Use the %tensorflow_version magic if in colab.
%tensorflow_version 2.x
except Exception:
pass
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with a CNN Run in Google Colab View source on GitHub Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# Use the %tensorflow_version magic if in colab.
%tensorflow_version 2.x
except Exception:
pass
import numpy as np
import matplotlib.pyplot as plt
tf.enable_v2_behavior()
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Preprocessing With 1D-Convolutional Layers
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Fully Convolutional Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
###Output
_____no_output_____ |
projects/plant-pathology.ipynb | ###Markdown
Imports
###Code
import numpy as np
import pandas as pd
import os
import random, re, math
import tensorflow as tf, tensorflow.keras.backend as K
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from kaggle_datasets import KaggleDatasets
from random import seed
from random import randint
import cv2
from matplotlib import pyplot as plt
import seaborn as sns
from keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
!pip install efficientnet
import efficientnet.tfkeras as efn
###Output
Collecting efficientnet
Downloading efficientnet-1.1.0-py3-none-any.whl (18 kB)
Requirement already satisfied: keras-applications<=1.0.8,>=1.0.7 in /opt/conda/lib/python3.7/site-packages (from efficientnet) (1.0.8)
Requirement already satisfied: scikit-image in /opt/conda/lib/python3.7/site-packages (from efficientnet) (0.16.2)
Requirement already satisfied: h5py in /opt/conda/lib/python3.7/site-packages (from keras-applications<=1.0.8,>=1.0.7->efficientnet) (2.10.0)
Requirement already satisfied: numpy>=1.9.1 in /opt/conda/lib/python3.7/site-packages (from keras-applications<=1.0.8,>=1.0.7->efficientnet) (1.18.1)
Requirement already satisfied: scipy>=0.19.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (1.4.1)
Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (3.2.1)
Requirement already satisfied: networkx>=2.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (2.4)
Requirement already satisfied: pillow>=4.3.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (5.4.1)
Requirement already satisfied: imageio>=2.3.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (2.8.0)
Requirement already satisfied: PyWavelets>=0.4.0 in /opt/conda/lib/python3.7/site-packages (from scikit-image->efficientnet) (1.1.1)
Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from h5py->keras-applications<=1.0.8,>=1.0.7->efficientnet) (1.14.0)
Requirement already satisfied: python-dateutil>=2.1 in /opt/conda/lib/python3.7/site-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->efficientnet) (2.8.1)
Requirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.7/site-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->efficientnet) (0.10.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/lib/python3.7/site-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->efficientnet) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.7/site-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->efficientnet) (1.2.0)
Requirement already satisfied: decorator>=4.3.0 in /opt/conda/lib/python3.7/site-packages (from networkx>=2.0->scikit-image->efficientnet) (4.4.2)
Installing collected packages: efficientnet
Successfully installed efficientnet-1.1.0
###Markdown
TPU Setup
###Code
AUTO = tf.data.experimental.AUTOTUNE
# Detect hardware, return appropriate distribution strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection. No parameters necessary if TPU_NAME environment variable is set. On Kaggle this is always the case.
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy() # default distribution strategy in Tensorflow. Works on CPU and single GPU.
print("REPLICAS: ", strategy.num_replicas_in_sync)
# Data access
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
###Output
Running on TPU grpc://10.0.0.2:8470
REPLICAS: 8
###Markdown
Get DataDue to the overlap between the multiple diseases and the scab and rust categories we will try to compensate for the difficulty in differentiating between these categories by using a series of binary classifiers. Each classifier will be trained on a different subset of binary classifications from the original four. The important distinction between a one vs all approach is that we will have separate classifiers for rust, including multiple diseases, and rust only. We do the same for scab. This will hopefully beneficial because of the overlap between multiple diseases and each individual disease. Then we will use a final multiclass classifier that will use the output of the first six as input and will attempt to classifiy the actual class.
###Code
CSV_PATH = "/kaggle/input/plant-pathology-2020-fgvc7/"
BATCH_SIZE = 8 * strategy.num_replicas_in_sync
IMG_SIZE = 768
EPOCHS = 25
VERBOSE = 1
SHOW_GRAPHS = True
train_csv = pd.read_csv(CSV_PATH + 'train.csv')
test_csv = pd.read_csv(CSV_PATH + 'test.csv')
sub = pd.read_csv(CSV_PATH + 'sample_submission.csv')
# Note that we have to shuffle here and not anywhere else
train_csv = pd.read_csv(CSV_PATH + 'train.csv')
train_csv = train_csv.sample(frac=1).reset_index(drop=True)
test_csv = pd.read_csv(CSV_PATH + 'test.csv')
# Get full image paths
train_paths = train_csv.image_id.apply(lambda image: GCS_DS_PATH + '/images/' + image + '.jpg').values
test_paths = test_csv.image_id.apply(lambda image: GCS_DS_PATH + '/images/' + image + '.jpg').values
def decodeImage(filename, label=None, img_size=(IMG_SIZE, IMG_SIZE)):
bits = tf.io.read_file(filename)
image = tf.image.decode_jpeg(bits, channels = 3)
image = (tf.cast(image, tf.float32) / 127.5) - 1
# A few images are rotated
if image.shape != [1365, 2048, 3]:
image = tf.image.rot90(image)
image = tf.image.resize(image, img_size)
if label is None:
return image
else:
return image, label
def dataAugment(image, label=None, seed=2020):
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.random_flip_up_down(image, seed=seed)
if label is None:
return image
else:
return image, label
def getClassifierData(paths, labels, batch_size=BATCH_SIZE):
ds = (
tf.data.Dataset
.from_tensor_slices((train_paths, labels))
.map(decodeImage, num_parallel_calls=AUTO)
.map(dataAugment, num_parallel_calls=AUTO)
#.repeat() # Using repeat leads to inconsistent results when predicting this set (counts are always off)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
return ds
def calcWeights(df, label):
counts = df[label].value_counts()
return {0 : counts[1] / sum(counts),
1 : counts[0] / sum(counts)}
# C1 is healthy vs not healthy
train_csv['C1_Label'] = train_csv.apply(lambda row: 0 if row['healthy'] == 1 else 1, axis=1)
# C2 is rust vs not rust
train_csv['C2_Label'] = train_csv.apply(lambda row: 0 if row['rust'] == 1 else 1, axis=1)
# C3 is scab vs not scab
train_csv['C3_Label'] = train_csv.apply(lambda row: 0 if row['scab'] == 1 else 1, axis=1)
# C4 is both diseases vs one or none
train_csv['C4_Label'] = train_csv.apply(lambda row: 0 if row['multiple_diseases'] == 1 else 1, axis=1)
# C5 rust or both vs scab or none
train_csv['C5_Label'] = train_csv.apply(lambda row: 0 if row['multiple_diseases'] == 1 or row['rust'] == 1 else 1, axis=1)
# C6 rust or both vs scab or none
train_csv['C6_Label'] = train_csv.apply(lambda row: 0 if row['multiple_diseases'] == 1 or row['scab'] == 1 else 1, axis=1)
if SHOW_GRAPHS:
train_csv.head(10)
test_dataset = (
tf.data.Dataset
.from_tensor_slices(test_paths)
.map(decodeImage, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
)
# Left stuff for validation data in in case we want to use later
training_dict = {'C1' : {'Train' : getClassifierData(train_paths, train_csv['C1_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C1_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C1_Label')},
'C2' : {'Train' : getClassifierData(train_paths, train_csv['C2_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C2_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C2_Label')},
'C3' : {'Train' : getClassifierData(train_paths, train_csv['C3_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C3_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C3_Label')},
'C4' : {'Train' : getClassifierData(train_paths, train_csv['C4_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C4_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C4_Label')},
'C5' : {'Train' : getClassifierData(train_paths, train_csv['C5_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C5_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C5_Label')},
'C6' : {'Train' : getClassifierData(train_paths, train_csv['C6_Label'].values.reshape(-1,1)),
#'Val' : getClassifierData(val_data, 'C6_Label', batch_size=BATCH_SIZE),
'Weights' : calcWeights(train_csv, 'C6_Label')},
'Test' : test_dataset
}
###Output
_____no_output_____
###Markdown
Data Exploration
###Code
seed(8)
IMG_PATH = CSV_PATH + 'images/'
STEPS = train_csv.shape[0] // BATCH_SIZE
SHOW_IMAGES = False
NUM_PER_COL = 2
NUM_PER_ROW = 5
def showImages(df, num_per_col, num_per_row):
fig = plt.figure(figsize=(num_per_row*30, num_per_col*30))
for i in range(0, num_per_col * num_per_row):
plt.subplot(num_per_col, num_per_row, i+1)
image_id = df.iloc[randint(0, len(df)-1)]['image_id']
image = cv2.imread(IMG_PATH + image_id + '.jpg', cv2.IMREAD_UNCHANGED)
image = cv2.resize(image, (int(IMG_SIZE / 2), int(IMG_SIZE / 2)))
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
plt.show()
healthy_plants = train_csv[train_csv['healthy'] == 1]
if SHOW_IMAGES:
showImages(healthy_plants, NUM_PER_COL, NUM_PER_ROW)
scab_plants = train_csv[train_csv['scab'] == 1]
if SHOW_IMAGES:
showImages(scab_plants, NUM_PER_COL, NUM_PER_ROW)
rust_plants = train_csv[train_csv['rust'] == 1]
if SHOW_IMAGES:
showImages(rust_plants, NUM_PER_COL, NUM_PER_ROW)
multiple_plants = train_csv[train_csv['multiple_diseases'] == 1]
if SHOW_IMAGES:
showImages(multiple_plants, NUM_PER_COL, NUM_PER_ROW)
if SHOW_GRAPHS:
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
labels = ['healthy', 'rust', 'scab', 'multiple_diseases']
counts = [train_csv['healthy'].value_counts()[1],
train_csv['rust'].value_counts()[1],
train_csv['scab'].value_counts()[1],
train_csv['multiple_diseases'].value_counts()[1]]
ax.bar(labels, counts)
plt.show()
if SHOW_GRAPHS:
fig = plt.figure(figsize=(15,35))
plt.subplot(621)
ax = sns.countplot(x='C1_Label', data=train_csv, order=[0, 1])
ax.set_title('C1 Train')
ax.set_xticklabels(['Healthy', 'Not Healthy'])
ax.set(xlabel='Disease')
plt.subplot(622)
ax = sns.countplot(x='C1_Label', data=train_csv, order=[0, 1])
ax.set_title('C1 Test')
ax.set_xticklabels(['Healthy', 'Not Healthy'])
ax.set(xlabel='Disease')
plt.subplot(623)
ax = sns.countplot(x='C2_Label', data=train_csv, order=[0, 1])
ax.set_title('C2 Train')
ax.set_xticklabels(['Rust', 'Not Rust'])
ax.set(xlabel='Disease')
plt.subplot(624)
ax = sns.countplot(x='C2_Label', data=train_csv, order=[0, 1])
ax.set_title('C2 Test')
ax.set_xticklabels(['Rust', 'Not Rust'])
ax.set(xlabel='Disease')
plt.subplot(625)
ax = sns.countplot(x='C3_Label', data=train_csv, order=[0, 1])
ax.set_title('C3 Train')
ax.set_xticklabels(['Scab', 'Not Scab'])
ax.set(xlabel='Disease')
plt.subplot(626)
ax = sns.countplot(x='C3_Label', data=train_csv, order=[0, 1])
ax.set_title('C3 Test')
ax.set_xticklabels(['Scab', 'Not Scab'])
ax.set(xlabel='Disease')
plt.subplot(627)
ax = sns.countplot(x='C4_Label', data=train_csv, order=[0, 1])
ax.set_title('C4 Train')
ax.set_xticklabels(['Multiple', 'Not Multiple'])
ax.set(xlabel='Disease')
plt.subplot(628)
ax = sns.countplot(x='C4_Label', data=train_csv, order=[0, 1])
ax.set_title('C4 Test')
ax.set_xticklabels(['Multiple', 'Not Multiple'])
ax.set(xlabel='Disease')
plt.subplot(629)
ax = sns.countplot(x='C5_Label', data=train_csv, order=[0, 1])
ax.set_title('C5 Train')
ax.set_xticklabels(['Rust or Multiple', 'Scab or None'])
ax.set(xlabel='Disease')
plt.subplot(6,2,10)
ax = sns.countplot(x='C5_Label', data=train_csv, order=[0, 1])
ax.set_title('C5 Test')
ax.set_xticklabels(['Rust or Multiple', 'Scab or None'])
ax.set(xlabel='Disease')
plt.subplot(6,2,11)
ax = sns.countplot(x='C6_Label', data=train_csv, order=[0, 1])
ax.set_title('C6 Train')
ax.set_xticklabels(['Scab or Multiple', 'Rust or None'])
ax.set(xlabel='Disease')
plt.subplot(6,2,12)
ax = sns.countplot(x='C6_Label', data=train_csv, order=[0, 1])
ax.set_title('C6 Test')
ax.set_xticklabels(['Scab or Multiple', 'Rust or None'])
ax.set(xlabel='Disease')
plt.show()
###Output
_____no_output_____
###Markdown
Model
###Code
LR_START = 0.00001
LR_MAX = 0.0001 * strategy.num_replicas_in_sync
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 15
LR_SUSTAIN_EPOCHS = 3
LR_EXP_DECAY = .8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
if SHOW_GRAPHS:
rng = [i for i in range(EPOCHS)]
y = [lrfn(x) for x in rng]
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
# We have to disable the steps due to the way the training datasets are set up now
def get_model():
base_model = efn.EfficientNetB7(weights='imagenet', include_top=False,
pooling='avg', input_shape=(IMG_SIZE, IMG_SIZE, 3))
x = base_model.output
output = Dense(1, activation="sigmoid")(x)
return Model(inputs=base_model.input, outputs=output)
def fitModel(label, training_dict, lr_callback=lr_callback, epochs=EPOCHS, steps=STEPS, verbose=VERBOSE):
with strategy.scope():
model = get_model()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(
training_dict[label]['Train'],
#steps_per_epoch = steps,
callbacks = [lr_callback],
#class_weight = training_dict[label]['Weights'] # Class weights still aren't working
epochs = epochs,
verbose = verbose
)
return model
def getPredictions(label, training_dict, steps=STEPS, verbose=VERBOSE):
model = fitModel(label, training_dict)
train_predictions = model.predict(training_dict[label]['Train'], #steps = steps,
use_multiprocessing = True, verbose = verbose)
test_predictions = model.predict(training_dict['Test'], #steps = steps,
use_multiprocessing = True, verbose = verbose)
# Releases tpu memory from model so other models can run
tf.tpu.experimental.initialize_tpu_system(tpu)
return train_predictions, test_predictions
predictions = [getPredictions('C1', training_dict),
getPredictions('C2', training_dict),
getPredictions('C3', training_dict),
getPredictions('C4', training_dict),
getPredictions('C5', training_dict),
getPredictions('C6', training_dict)]
train_predictions, test_predictions = zip(*predictions)
train_predictions = np.hstack(train_predictions)
test_predictions = np.hstack(test_predictions)
inputs = Input(shape=(train_predictions.shape[1],))
dense1 = Dense(200, activation='relu')(inputs)
dropout = Dropout(0.3)(dense1)
dense2 = Dense(50, activation='relu')(dropout)
outputs = Dense(4, activation='softmax')(dense2)
model_final = Model(inputs=inputs, outputs=outputs)
model_final.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_final.fit(
train_predictions,
train_csv[['healthy', 'multiple_diseases', 'rust', 'scab']].values,
epochs = EPOCHS
)
###Output
Epoch 1/25
57/57 [==============================] - 0s 4ms/step - loss: 0.5367 - accuracy: 0.9198
Epoch 2/25
57/57 [==============================] - 0s 4ms/step - loss: 0.0295 - accuracy: 0.9995
Epoch 3/25
57/57 [==============================] - 0s 4ms/step - loss: 0.0070 - accuracy: 1.0000
Epoch 4/25
57/57 [==============================] - 0s 4ms/step - loss: 0.0038 - accuracy: 1.0000
Epoch 5/25
57/57 [==============================] - 0s 4ms/step - loss: 0.0020 - accuracy: 1.0000
Epoch 6/25
57/57 [==============================] - 0s 5ms/step - loss: 0.0014 - accuracy: 1.0000
Epoch 7/25
57/57 [==============================] - 0s 4ms/step - loss: 0.0013 - accuracy: 1.0000
Epoch 8/25
57/57 [==============================] - 0s 4ms/step - loss: 7.1795e-04 - accuracy: 1.0000
Epoch 9/25
57/57 [==============================] - 0s 4ms/step - loss: 5.5469e-04 - accuracy: 1.0000
Epoch 10/25
57/57 [==============================] - 0s 5ms/step - loss: 5.4226e-04 - accuracy: 1.0000
Epoch 11/25
57/57 [==============================] - 0s 4ms/step - loss: 4.0120e-04 - accuracy: 1.0000
Epoch 12/25
57/57 [==============================] - 0s 4ms/step - loss: 3.8169e-04 - accuracy: 1.0000
Epoch 13/25
57/57 [==============================] - 0s 4ms/step - loss: 2.8036e-04 - accuracy: 1.0000
Epoch 14/25
57/57 [==============================] - 0s 4ms/step - loss: 2.5784e-04 - accuracy: 1.0000
Epoch 15/25
57/57 [==============================] - 0s 4ms/step - loss: 2.1250e-04 - accuracy: 1.0000
Epoch 16/25
57/57 [==============================] - 0s 4ms/step - loss: 1.8089e-04 - accuracy: 1.0000
Epoch 17/25
57/57 [==============================] - 0s 4ms/step - loss: 1.9401e-04 - accuracy: 1.0000
Epoch 18/25
57/57 [==============================] - 0s 4ms/step - loss: 1.4937e-04 - accuracy: 1.0000
Epoch 19/25
57/57 [==============================] - 0s 4ms/step - loss: 1.3970e-04 - accuracy: 1.0000
Epoch 20/25
57/57 [==============================] - 0s 4ms/step - loss: 1.1278e-04 - accuracy: 1.0000
Epoch 21/25
57/57 [==============================] - 0s 4ms/step - loss: 9.8410e-05 - accuracy: 1.0000
Epoch 22/25
57/57 [==============================] - 0s 4ms/step - loss: 9.1379e-05 - accuracy: 1.0000
Epoch 23/25
57/57 [==============================] - 0s 4ms/step - loss: 8.2861e-05 - accuracy: 1.0000
Epoch 24/25
57/57 [==============================] - 0s 4ms/step - loss: 7.4933e-05 - accuracy: 1.0000
Epoch 25/25
57/57 [==============================] - 0s 5ms/step - loss: 7.1600e-05 - accuracy: 1.0000
###Markdown
Predictions
###Code
sub_predictions = model_final.predict(test_predictions)
sub_predictions = pd.DataFrame(data=sub_predictions, columns=['healthy', 'multiple_diseases', 'rust', 'scab'])
sub_predictions = pd.concat([test_csv, sub_predictions], axis=1)
sub_predictions
sub_predictions.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
Model backlog/Train/65-melanoma-5fold-EfficientNetB0 RAdam.ipynb | ###Markdown
Dependencies
###Code
#@title
!pip install --quiet efficientnet
# !pip install --quiet image-classifiers
#@title
import warnings, json, re, glob, math
from scripts_step_lr_schedulers import *
from melanoma_utility_scripts import *
from sklearn.model_selection import KFold
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras import optimizers, layers, metrics, losses, Model
import efficientnet.tfkeras as efn
# from classification_models.tfkeras import Classifiers
import tensorflow_addons as tfa
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
TPU configuration
###Code
#@title
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Model parameters
###Code
#@title
config = {
"HEIGHT": 256,
"WIDTH": 256,
"CHANNELS": 3,
"BATCH_SIZE": 128,
"EPOCHS": 12,
"LEARNING_RATE": 3e-4,
"ES_PATIENCE": 10,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"TTA_STEPS": 25,
"BASE_MODEL": 'EfficientNetB0',
"BASE_MODEL_WEIGHTS": 'noisy-student',
"DATASET_PATH": 'melanoma-256x256'
}
with open(MODEL_BASE_PATH + 'config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
###Output
_____no_output_____
###Markdown
Load data
###Code
#@title
database_base_path = COLAB_BASE_PATH + 'Data/'
k_fold = pd.read_csv(database_base_path + 'train.csv')
test = pd.read_csv(database_base_path + 'test.csv')
print('Train samples: %d' % len(k_fold))
display(k_fold.head())
print(f'Test samples: {len(test)}')
display(test.head())
GCS_PATH = 'gs://kds-00a03a913a177ffd710f19e39a9e65d51860d02557f57cc1d1d8e589'
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')
###Output
Train samples: 33126
###Markdown
Augmentations
###Code
#@title
def data_augment(image, label):
p_spatial = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_spatial2 = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_rotate = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_pixel = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
### Spatial-level transforms
if p_spatial >= .2: # flips
image['input_image'] = tf.image.random_flip_left_right(image['input_image'])
image['input_image'] = tf.image.random_flip_up_down(image['input_image'])
if p_spatial >= .7:
image['input_image'] = tf.image.transpose(image['input_image'])
if p_rotate >= .8: # rotate 270º
image['input_image'] = tf.image.rot90(image['input_image'], k=3)
elif p_rotate >= .6: # rotate 180º
image['input_image'] = tf.image.rot90(image['input_image'], k=2)
elif p_rotate >= .4: # rotate 90º
image['input_image'] = tf.image.rot90(image['input_image'], k=1)
if p_spatial2 >= .6:
if p_spatial2 >= .9:
image['input_image'] = transform_rotation(image['input_image'], config['HEIGHT'], 180.)
elif p_spatial2 >= .8:
image['input_image'] = transform_zoom(image['input_image'], config['HEIGHT'], 8., 8.)
elif p_spatial2 >= .7:
image['input_image'] = transform_shift(image['input_image'], config['HEIGHT'], 8., 8.)
else:
image['input_image'] = transform_shear(image['input_image'], config['HEIGHT'], 2.)
if p_crop >= .6: # crops
if p_crop >= .8:
image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.8), int(config['WIDTH']*.8), config['CHANNELS']])
elif p_crop >= .7:
image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])
else:
image['input_image'] = tf.image.central_crop(image['input_image'], central_fraction=.8)
image['input_image'] = tf.image.resize(image['input_image'], size=[config['HEIGHT'], config['WIDTH']])
if p_pixel >= .6: # Pixel-level transforms
if p_pixel >= .9:
image['input_image'] = tf.image.random_hue(image['input_image'], 0.01)
elif p_pixel >= .8:
image['input_image'] = tf.image.random_saturation(image['input_image'], 0.7, 1.3)
elif p_pixel >= .7:
image['input_image'] = tf.image.random_contrast(image['input_image'], 0.8, 1.2)
else:
image['input_image'] = tf.image.random_brightness(image['input_image'], 0.1)
return image, label
###Output
_____no_output_____
###Markdown
Auxiliary functions
###Code
#@title
# Datasets utility functions
def read_labeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
label = tf.cast(example['target'], tf.float32)
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_meta': data}, label # returns a dataset of (image, data, label)
def read_labeled_tfrecord_eval(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
label = tf.cast(example['target'], tf.float32)
image_name = example['image_name']
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_meta': data}, label, image_name # returns a dataset of (image, data, label, image_name)
def load_dataset(filenames, ordered=False, buffer_size=-1):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False # disable order, increase speed
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order
dataset = dataset.map(read_labeled_tfrecord, num_parallel_calls=buffer_size)
return dataset # returns a dataset of (image, data, label)
def load_dataset_eval(filenames, buffer_size=-1):
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.map(read_labeled_tfrecord_eval, num_parallel_calls=buffer_size)
return dataset # returns a dataset of (image, data, label, image_name)
def get_training_dataset(filenames, batch_size, buffer_size=-1):
dataset = load_dataset(filenames, ordered=False, buffer_size=buffer_size)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat() # the training dataset must repeat for several epochs
dataset = dataset.shuffle(2048)
dataset = dataset.batch(batch_size, drop_remainder=True) # slighly faster with fixed tensor sizes
dataset = dataset.prefetch(buffer_size) # prefetch next batch while training (autotune prefetch buffer size)
return dataset
def get_validation_dataset(filenames, ordered=True, repeated=False, batch_size=32, buffer_size=-1):
dataset = load_dataset(filenames, ordered=ordered, buffer_size=buffer_size)
if repeated:
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(batch_size, drop_remainder=repeated)
dataset = dataset.prefetch(buffer_size)
return dataset
def get_eval_dataset(filenames, batch_size=32, buffer_size=-1):
dataset = load_dataset_eval(filenames, buffer_size=buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size)
return dataset
# Test function
def read_unlabeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
image_name = example['image_name']
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_tabular': data}, image_name # returns a dataset of (image, data, image_name)
def load_dataset_test(filenames, buffer_size=-1):
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.map(read_unlabeled_tfrecord, num_parallel_calls=buffer_size)
# returns a dataset of (image, data, label, image_name) pairs if labeled=True or (image, data, image_name) pairs if labeled=False
return dataset
def get_test_dataset(filenames, batch_size=32, buffer_size=-1, tta=False):
dataset = load_dataset_test(filenames, buffer_size=buffer_size)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size)
return dataset
# Advanced augmentations
def transform_rotation(image, height, rotation):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly rotated
DIM = height
XDIM = DIM%2 #fix for size 331
rotation = rotation * tf.random.normal([1],dtype='float32')
# CONVERT DEGREES TO RADIANS
rotation = math.pi * rotation / 180.
# ROTATION MATRIX
c1 = tf.math.cos(rotation)
s1 = tf.math.sin(rotation)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shear(image, height, shear):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly sheared
DIM = height
XDIM = DIM%2 #fix for size 331
shear = shear * tf.random.normal([1],dtype='float32')
shear = math.pi * shear / 180.
# SHEAR MATRIX
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
c2 = tf.math.cos(shear)
s2 = tf.math.sin(shear)
shear_matrix = tf.reshape( tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shift(image, height, h_shift, w_shift):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly shifted
DIM = height
XDIM = DIM%2 #fix for size 331
height_shift = h_shift * tf.random.normal([1],dtype='float32')
width_shift = w_shift * tf.random.normal([1],dtype='float32')
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
# SHIFT MATRIX
shift_matrix = tf.reshape( tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(shift_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_zoom(image, height, h_zoom, w_zoom):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly zoomed
DIM = height
XDIM = DIM%2 #fix for size 331
height_zoom = 1.0 + tf.random.normal([1],dtype='float32')/h_zoom
width_zoom = 1.0 + tf.random.normal([1],dtype='float32')/w_zoom
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
# ZOOM MATRIX
zoom_matrix = tf.reshape( tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(zoom_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
###Output
_____no_output_____
###Markdown
Learning rate scheduler
###Code
#@title
lr_min = 1e-6
# lr_start = 5e-6
lr_max = config['LEARNING_RATE']
steps_per_epoch = 24844 // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * steps_per_epoch
warmup_steps = steps_per_epoch * 5
# hold_max_steps = 0
# step_decay = .8
# step_size = steps_per_epoch * 1
# rng = [i for i in range(0, total_steps, 32)]
# y = [step_schedule_with_warmup(tf.cast(x, tf.float32), step_size=step_size,
# warmup_steps=warmup_steps, hold_max_steps=hold_max_steps,
# lr_start=lr_start, lr_max=lr_max, step_decay=step_decay) for x in rng]
# sns.set(style="whitegrid")
# fig, ax = plt.subplots(figsize=(20, 6))
# plt.plot(rng, y)
# print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
###Output
_____no_output_____
###Markdown
Model
###Code
#@title
# Initial bias
pos = len(k_fold[k_fold['target'] == 1])
neg = len(k_fold[k_fold['target'] == 0])
initial_bias = np.log([pos/neg])
print('Bias')
print(pos)
print(neg)
print(initial_bias)
# class weights
total = len(k_fold)
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Class weight')
print(class_weight)
def model_fn(input_shape):
input_image = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB0(weights=config['BASE_MODEL_WEIGHTS'],
include_top=False)
x = base_model(input_image)
x = L.GlobalAveragePooling2D()(x)
output = L.Dense(1, activation='sigmoid', name='output',
bias_initializer=tf.keras.initializers.Constant(initial_bias))(x)
model = Model(inputs=input_image, outputs=output)
return model
###Output
_____no_output_____
###Markdown
Training
###Code
# Evaluation
eval_dataset = get_eval_dataset(TRAINING_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)
image_names = next(iter(eval_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(TRAINING_FILENAMES)))).numpy().astype('U')
image_data = eval_dataset.map(lambda data, label, image_name: data)
# Resample dataframe
k_fold = k_fold[k_fold['image_name'].isin(image_names)]
# Test
NUM_TEST_IMAGES = len(test)
test_preds = np.zeros((NUM_TEST_IMAGES, 1))
test_preds_last = np.zeros((NUM_TEST_IMAGES, 1))
test_dataset = get_test_dataset(TEST_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, tta=True)
image_names_test = next(iter(test_dataset.unbatch().map(lambda data, image_name: image_name).batch(NUM_TEST_IMAGES))).numpy().astype('U')
test_image_data = test_dataset.map(lambda data, image_name: data)
history_list = []
k_fold_best = k_fold.copy()
kfold = KFold(config['N_FOLDS'], shuffle=True, random_state=SEED)
for n_fold, (trn_idx, val_idx) in enumerate(kfold.split(TRAINING_FILENAMES)):
if n_fold < config['N_USED_FOLDS']:
n_fold +=1
print('\nFOLD: %d' % (n_fold))
tf.tpu.experimental.initialize_tpu_system(tpu)
K.clear_session()
### Data
train_filenames = np.array(TRAINING_FILENAMES)[trn_idx]
valid_filenames = np.array(TRAINING_FILENAMES)[val_idx]
steps_per_epoch = count_data_items(train_filenames) // config['BATCH_SIZE']
# Train model
model_path = f'model_fold_{n_fold}.h5'
es = EarlyStopping(monitor='val_auc', mode='max', patience=config['ES_PATIENCE'],
restore_best_weights=False, verbose=1)
checkpoint = ModelCheckpoint((MODEL_BASE_PATH + model_path), monitor='val_auc', mode='max',
save_best_only=True, save_weights_only=True)
with strategy.scope():
model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS']))
optimizer = tfa.optimizers.RectifiedAdam(lr=lr_max,
total_steps=total_steps,
warmup_proportion=(warmup_steps / total_steps),
min_lr=lr_min)
model.compile(optimizer, loss=losses.BinaryCrossentropy(label_smoothing=0.2),
metrics=[metrics.AUC()])
history = model.fit(get_training_dataset(train_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO),
validation_data=get_validation_dataset(valid_filenames, ordered=True, repeated=False,
batch_size=config['BATCH_SIZE'], buffer_size=AUTO),
epochs=config['EPOCHS'],
steps_per_epoch=steps_per_epoch ,
callbacks=[checkpoint, es],
class_weight=class_weight,
verbose=2).history
# save last epoch weights
model.save_weights((MODEL_BASE_PATH + 'last_' + model_path))
history_list.append(history)
# Get validation IDs
valid_dataset = get_eval_dataset(valid_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)
valid_image_names = next(iter(valid_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(valid_filenames)))).numpy().astype('U')
k_fold[f'fold_{n_fold}'] = k_fold.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1)
k_fold_best[f'fold_{n_fold}'] = k_fold_best.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1)
##### Last model #####
print('Last model evaluation...')
preds = model.predict(image_data)
name_preds_eval = dict(zip(image_names, preds.reshape(len(preds))))
k_fold[f'pred_fold_{n_fold}'] = k_fold.apply(lambda x: name_preds_eval[x['image_name']], axis=1)
print(f'Last model inference (TTA {config["TTA_STEPS"]} steps)...')
for step in range(config['TTA_STEPS']):
test_preds_last += model.predict(test_image_data)
##### Best model #####
print('Best model evaluation...')
model.load_weights(MODEL_BASE_PATH + model_path)
preds = model.predict(image_data)
name_preds_eval = dict(zip(image_names, preds.reshape(len(preds))))
k_fold_best[f'pred_fold_{n_fold}'] = k_fold_best.apply(lambda x: name_preds_eval[x['image_name']], axis=1)
print(f'Best model inference (TTA {config["TTA_STEPS"]} steps)...')
for step in range(config['TTA_STEPS']):
test_preds += model.predict(test_image_data)
# normalize preds
test_preds /= (config['N_USED_FOLDS'] * config['TTA_STEPS'])
test_preds_last /= (config['N_USED_FOLDS'] * config['TTA_STEPS'])
name_preds = dict(zip(image_names_test, test_preds.reshape(NUM_TEST_IMAGES)))
name_preds_last = dict(zip(image_names_test, test_preds_last.reshape(NUM_TEST_IMAGES)))
test['target'] = test.apply(lambda x: name_preds[x['image_name']], axis=1)
test['target_last'] = test.apply(lambda x: name_preds_last[x['image_name']], axis=1)
###Output
FOLD: 1
WARNING:tensorflow:TPU system grpc://10.59.153.10:8470 has already been initialized. Reinitializing the TPU can cause previously created variables on TPU to be lost.
###Markdown
Model loss graph
###Code
#@title
for n_fold in range(config['N_USED_FOLDS']):
print(f'Fold: {n_fold + 1}')
plot_metrics(history_list[n_fold])
###Output
Fold: 1
###Markdown
Model loss graph aggregated
###Code
#@title
plot_metrics_agg(history_list, config['N_USED_FOLDS'])
###Output
_____no_output_____
###Markdown
Model evaluation (best)
###Code
#@title
display(evaluate_model(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map))
display(evaluate_model_Subset(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map))
###Output
_____no_output_____
###Markdown
Model evaluation (last)
###Code
#@title
display(evaluate_model(k_fold, config['N_USED_FOLDS']).style.applymap(color_map))
display(evaluate_model_Subset(k_fold, config['N_USED_FOLDS']).style.applymap(color_map))
###Output
_____no_output_____
###Markdown
Confusion matrix
###Code
#@title
for n_fold in range(config['N_USED_FOLDS']):
n_fold += 1
pred_col = f'pred_fold_{n_fold}'
train_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'train']
valid_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'validation']
print(f'Fold: {n_fold}')
plot_confusion_matrix(train_set['target'], np.round(train_set[pred_col]),
valid_set['target'], np.round(valid_set[pred_col]))
###Output
Fold: 1
###Markdown
Visualize predictions
###Code
#@title
k_fold['pred'] = 0
for n_fold in range(config['N_USED_FOLDS']):
k_fold['pred'] += k_fold[f'pred_fold_{n_fold+1}'] / config['N_FOLDS']
print('Label/prediction distribution')
print(f"Train positive labels: {len(k_fold[k_fold['target'] > .5])}")
print(f"Train positive predictions: {len(k_fold[k_fold['pred'] > .5])}")
print(f"Train positive correct predictions: {len(k_fold[(k_fold['target'] > .5) & (k_fold['pred'] > .5)])}")
print('Top 10 samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].head(10))
print('Top 10 positive samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('target == 1').head(10))
print('Top 10 predicted positive samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('pred > .5').head(10))
###Output
Label/prediction distribution
Train positive labels: 581
Train positive predictions: 4164
Train positive correct predictions: 573
Top 10 samples
###Markdown
Visualize test predictions
###Code
#@title
print(f"Test predictions {len(test[test['target'] > .5])}|{len(test[test['target'] <= .5])}")
print(f"Test predictions (last) {len(test[test['target_last'] > .5])}|{len(test[test['target_last'] <= .5])}")
print('Top 10 samples')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last']
+ [c for c in test.columns if (c.startswith('pred_fold'))]].head(10))
print('Top 10 positive samples')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last']
+ [c for c in test.columns if (c.startswith('pred_fold'))]].query('target > .5').head(10))
print('Top 10 positive samples (last)')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last']
+ [c for c in test.columns if (c.startswith('pred_fold'))]].query('target_last > .5').head(10))
###Output
Test predictions 1784|9198
Test predictions (last) 1624|9358
Top 10 samples
###Markdown
Test set predictions
###Code
#@title
submission = pd.read_csv(database_base_path + 'sample_submission.csv')
submission['target'] = test['target']
submission['target_last'] = test['target_last']
submission['target_blend'] = (test['target'] * .5) + (test['target_last'] * .5)
display(submission.head(10))
display(submission.describe())
### BEST ###
submission[['image_name', 'target']].to_csv(SUBMISSION_PATH, index=False)
### LAST ###
submission_last = submission[['image_name', 'target_last']]
submission_last.columns = ['image_name', 'target']
submission_last.to_csv(SUBMISSION_LAST_PATH, index=False)
### BLEND ###
submission_blend = submission[['image_name', 'target_blend']]
submission_blend.columns = ['image_name', 'target']
submission_blend.to_csv(SUBMISSION_BLEND_PATH, index=False)
###Output
_____no_output_____ |
AI/numpy_basics.ipynb | ###Markdown
Importing Numpy
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Creating a Numpy Array
###Code
x = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(x, type(x))
###Output
[[1 2 3]
[4 5 6]
[7 8 9]] <class 'numpy.ndarray'>
###Markdown
Various Ways of indexing
###Code
x[1,2]
x[:,1]
x[:,2]
x[:-1, :-1]
x[...]
x[:2,...]
###Output
_____no_output_____
###Markdown
Basic Addition
###Code
x+1
x-2
###Output
_____no_output_____
###Markdown
Basic Data Types in Numpy
###Code
x.dtype
np.array(['ramesh', 'suresh']).dtype
np.array(['ramu', 'rami']).dtype
###Output
_____no_output_____
###Markdown
Symbols for Various DataTypes `i` - integer`b` - boolean`u` - unsigned integer`f` - float`c` - complex float`m` - timedelta`M` - datetime`O` - object`S` - string`U` - unicode string`V` - fixed chunk of memory for other type ( void )`>` means big-endian and `<` means little-endian.And the number indicates the number of bytes. The 6 in '<U6' is the number of bytes occupied by each element of the array. Creating a DataType Object
###Code
dt = np.dtype('<c8')
np.array([2+3j, 5+6j], dtype=dt)
dt.itemsize
###Output
_____no_output_____
###Markdown
Structured Data Type It is used to make numpy arrays over elements belonging to a structure. Each element must be of fixed size.
###Code
class lattitude:
def __init__(self, lattitude):
assert lattitude<=90, "Lattittude must be less than or equal to 90 degrees."
assert lattitude>=-90, "Lattittude must be greater than or equal to -90 degrees."
self.lat = lattitude
class longitude:
def __init__(self, longitude):
assert longitude<=180, "Longitude must be less than or equal to 180 degrees."
assert longitude>=-180, "Longitude must be greater than or equal to -180 degrees."
self.lon = longitude
dt = np.dtype(lattitude, longitude)
dt.itemsize
y = np.array([(lattitude(0),longitude(0)),
(lattitude(-6.3572375290155), longitude(-63.572375290155))], dtype=dt)
y[1][0].lat
###Output
_____no_output_____
###Markdown
10 Methods in Numpy
###Code
#sorting an array
np.sort(-x, axis=1)
#Finding max
print(np.argmax(x), np.max(x), np.argmax(x, axis=1))
#Dot product
np.dot(x[0], x[1])
#Outer Product
np.outer(x[0], x[1])
#Determinant
np.linalg.det(x)
#Covariance Matrix
np.cov(x)
#Solving Ax=B
x = np.array([[3,4,5], [7,4,2], [9,7,1]])
np.linalg.solve(np.transpose(x), x[0])
#Finding which positions have value between 2 and 6
np.logical_and(x>2, x<6)
#Getting EigenValues of a matrix
np.linalg.eig(x)
#upper and lower triangular matrices
np.triu(x), np.tril(x)
#Cholesky Decomposition
np.linalg.cholesky(np.triu(x)*np.tril(x))
###Output
_____no_output_____
###Markdown
Finding Unique Elements
###Code
np.unique(x)
###Output
_____no_output_____ |
notebooks/10-reproducible-environment.ipynb | ###Markdown
Tutorial 1: Reproducible Environments(Continued from `README.md`) Overview* Requirements: The Bare Minimum * Using a Data Science Template: `cookiecutter`* Virtual Environments: `conda` and environment files* Revision Control: git and a git workflow * Installing, Enabling, and using nbdime* The Data Science DAG * make, Makefiles and data flow* Python Modules * Creating an editable module* Testing: doctest, pytest, hypothesis We'll start out by checking that all the requirements are met from the previous exercises (started in `README.md`) Exercise 1: Install the requirements* Anaconda* Cookiecutter* make* git Test your installation
###Code
!conda --version # or `$CONDA_EXE --version` in some environments
!make --version
!git --version
###Output
git version 2.20.1 (Apple Git-117)
###Markdown
Exercise 2: Start your cookiecutter-based projectCreate a project called `Bus Number Tutorial`: Use conda as your virtualenv manager Use python 3.6 or greaterWhen complete, you should have a fully populated project directory, complete with customized README.md.We will be working in this project from now on. Exercise 2b:Explore the `README.md` from your new `bus_number_tutorial` repo(Hint: You can use the `%load` magic, or `!cat` to look at it in your notebook) %load '../README.md'Bus Number Tutorial==============================Increase my bus numberGETTING STARTED---------------* Create and switch to the virtual environment:```cd bus_number_tutorialmake create_environmentconda activate bus_number_tutorialmake requirements```* Explore the notebooks in the `notebooks` directoryProject Organization------------* `LICENSE`* `Makefile` * top-level makefile. Type `make` for a list of valid commands* `README.md` * this file* `data` * Data directory. often symlinked to a filesystem with lots of space * `data/raw` * Raw (immutable) hash-verified downloads * `data/interim` * Extracted and interim data representations * `data/processed` * The final, canonical data sets for modeling.* `docs` * A default Sphinx project; see sphinx-doc.org for details* `models` * Trained and serialized models, model predictions, or model summaries * `models/trained` * Trained models * `models/output` * predictions and transformations from the trained models* `notebooks` * Jupyter notebooks. Naming convention is a number (for ordering), the creator's initials, and a short `-` delimited description, e.g. `1.0-jqp-initial-data-exploration`.* `references` * Data dictionaries, manuals, and all other explanatory materials.* `reports` * Generated analysis as HTML, PDF, LaTeX, etc. * `reports/figures` * Generated graphics and figures to be used in reporting * `reports/tables` * Generated data tables to be used in reporting * `reports/summary` * Generated summary information to be used in reporting* `requirements.txt` * (if using pip+virtualenv) The requirements file for reproducing the analysis environment, e.g. generated with `pip freeze > requirements.txt`* `environment.yml` * (if using conda) The YAML file for reproducing the analysis environment* `setup.py` * Turns contents of `src` into a pip-installable python module (`pip install -e .`) so it can be imported in python code* `src` * Source code for use in this project. * `src/__init__.py` * Makes src a Python module * `src/data` * Scripts to fetch or generate data. In particular: * `src/data/make_dataset.py` * Run with `python -m src.data.make_dataset fetch` or `python -m src.data.make_dataset process` * `src/analysis` * Scripts to turn datasets into output products * `src/models` * Scripts to train models and then use trained models to make predictions. e.g. `predict_model.py`, `train_model.py`* `tox.ini` * tox file with settings for running tox; see tox.testrun.org--------This project was built using cookiecutter-easydata, an experimental fork of [cookiecutter-data-science](https://github.com/drivendata/cookiecutter-data-science) aimed at making your data science workflow reproducible. Exercise 3: Set up your virtual environment and install all dependenciesCreate and activate your `bus_number_tutorial` conda environment using the above make commands. Your `active environment` should be `bus_number_tutorial`
###Code
!conda info
###Output
active environment : bus_number_tutorial
active env location : /anaconda3/envs/bus_number_tutorial
shell level : 2
user config file : /Users/danielhaugstvedt/.condarc
populated config files : /Users/danielhaugstvedt/.condarc
conda version : 4.6.14
conda-build version : 3.15.1
python version : 3.6.8.final.0
base environment : /anaconda3 (writable)
channel URLs : https://conda.anaconda.org/conda-forge/osx-64
https://conda.anaconda.org/conda-forge/noarch
https://repo.anaconda.com/pkgs/main/osx-64
https://repo.anaconda.com/pkgs/main/noarch
https://repo.anaconda.com/pkgs/free/osx-64
https://repo.anaconda.com/pkgs/free/noarch
https://repo.anaconda.com/pkgs/r/osx-64
https://repo.anaconda.com/pkgs/r/noarch
package cache : /anaconda3/pkgs
/Users/danielhaugstvedt/.conda/pkgs
envs directories : /anaconda3/envs
/Users/danielhaugstvedt/.conda/envs
platform : osx-64
user-agent : conda/4.6.14 requests/2.19.1 CPython/3.6.8 Darwin/18.6.0 OSX/10.14.5
UID:GID : 501:20
netrc file : None
offline mode : False
###Markdown
**Note:** If you are using **JupyterHub**, the bash magics `!` and `%%bash` will not work as expected, that is, they will drop you into your root JupyterHub environment, as opposed to the conda kernel that you a running this notebook in, and you will not see `bus_number_tutorial`. To get around this, you will need to run the bash commands in this notebook from a terminal instance with your `bus_number_tutorial` conda environment activated. If done correctly, you should also be able to import from `src`
###Code
# if importing src doesn't work, try `make requirements`
import src
###Output
_____no_output_____
###Markdown
Exercise 4: Pick up this tutorial in your new repo* Run jupyter notebook and open `notebooks/10-reproducible-environment.ipynb`If you're currently running this notebook and the checks from the previous exercises worked, then you're in business!Keep going from here! Revision Control: `git`How do we keep track of our changes? We use **git**.Before we do anything interesting, let's initialize a git repository (repo) here. Exercise 5: Initialize a git repo for `bus_number_tutorial` ```git initgit add .git commit -m "Initial Import"```
###Code
!git status
###Output
On branch master
Your branch is ahead of 'origin/master' by 2 commits.
(use "git push" to publish your local commits)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
[31mmodified: 10-reproducible-environment.ipynb[m
[31mmodified: ../src/utils.py[m
no changes added to commit (use "git add" and/or "git commit -a")
###Markdown
We will get back to using git again soon. Exercise 6: Add a dependencyModify the environment file so that `make requirements` installs some additional packages* install `joblib` using conda* install `nbdime` using pip
###Code
# Check that you now have joblib and nbdime installed
# Don't forget that you need to run `make requirements` once you've change the `environment.yml` file
import joblib
import nbdime
###Output
_____no_output_____
###Markdown
Exercise 7: Basic git interactionsCheck the changes to your `environment.yml` file into your git repo See what has changed with git:
###Code
!git status
!git diff -u ../environment.yml
###Output
_____no_output_____
###Markdown
To add or reject your changes incrementally:
###Code
#!git add -p
#!git reset -p
###Output
_____no_output_____
###Markdown
Commit the changes
###Code
#!git commit -v
# You should have no differences in your branch now
# Except for those that you've made by running notebooks
!git status
###Output
On branch master
Your branch is ahead of 'origin/master' by 2 commits.
(use "git push" to publish your local commits)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
[31mmodified: 10-reproducible-environment.ipynb[m
[31mmodified: ../src/utils.py[m
no changes added to commit (use "git add" and/or "git commit -a")
###Markdown
The Data Science DAGDAG = Directed Acyclic Graph. That means the process eventually stops. (This is a good thing!) It also means we can use a super old, but incredibly handy tool to implement this workflow: `make`. Make, Makefiles, and the Data FlowWe use a `Makefile` to organize and invoke the various steps in our Data Science pipeline.You have already used this file when you created your virtual environment in the first place:```make create_environment```Here are the steps we will be working through in this tutorial:A [PDF version of the cheat sheet](references/cheat_sheet.pdf) is also available. What's my make target doing?If you are ever curious what commands a `make` command will invoke (including any invoked dependencies), use `make -n`, which lists the commands without executing them:
###Code
%%bash
pwd
%%bash
cd .. && make -n requirements
###Output
python3 test_environment.py
###Markdown
We use a cute **self-documenting makefiles trick** (borrowed from `cookiecutter-datascience`) to make it easy to document the various targets that you add. This documentation is produced when you type a plain `make`:
###Code
%%bash
cd .. && make
###Output
To get started:
>>> [1mmake create_environment[m
>>> [1mconda activate bus_number_tutorial[m
[1mProject Variables:[m
PROJECT_NAME = bus_number_tutorial
[1mAvailable rules:[m
[36manalysis [m Convert predictions / transforms / experiments into output
data
[36mclean [m Delete all compiled Python files
[36mclean_interim [m Delete all interim (DataSource) files
[36mclean_models [m Delete all trained models
[36mclean_predictions [m Delete all predictions
[36mclean_processed [m Delete all processed datasets
[36mclean_raw [m Delete the raw downloads directory
[36mcreate_environment [m Set up virtual environment for this project
[36mdata [m convert raw datasets into fully processed datasets
[36mdelete_environment [m Delete the virtual environment for this project
[36mlint [m Lint using flake8
[36mpredict [m predict / transform / run experiments
[36mrequirements [m Install or update Python Dependencies
[36msources [m Fetch, Unpack, and Process raw DataSources
[36msync_data_from_s3 [m Download Data from S3
[36msync_data_to_s3 [m Upload Data to S3
[36mtest [m Run all Unit Tests
[36mtest_environment [m Test python environment is set-up correctly
[36mtrain [m train / fit / build models
[36mtransform_data [m Apply Transformations to produce fully processed Datsets
###Markdown
Under the Hood: The Format of a Makefile``` Comment to appear in the auto-generated documentationthing_to_build: space separated list of dependencies command_to_run there is a tab before this command. another_command_to_run every line gets run in a *new shell*```
###Code
%%file Makefile.test
data: raw
@echo "Build Datasets"
train_test_split:
@echo "do train/test split"
train: data transform_data train_test_split
@echo "Train Models"
transform_data:
@echo "do a data transformation"
raw:
@echo "Fetch raw data"
# Note: you can run a specific Makefile with with -f option
!make -f Makefile.test data
###Output
Fetch raw data
Build Datasets
###Markdown
Note: If you see: ```*** missing separator. Stop.``` it's because you have used spaces instead of **tabs** before your commands. Exercise 8: What does this `Makefile.test` print when you run `make train`? Exercise 9: What happens when you add a cycle to a MakefileSet up a makefile with a cyclic dependency and run it It prints:`make: Circular raw <- data dependency dropped.` Using a Makefile like this is an easy way to set up a process flow expressed as a Directed Acyclic Graph (DAG).**Note**: We have only scratched the surface here. The are lots of interesting tricks you can do with make.* http://zmjones.com/make/* http://blog.byronjsmith.com/makefile-shortcuts.html* https://www.gnu.org/software/make/manual/ Back to Revision Control: git workflowsGit isn't really a collaboration tool. It's more a tool for implementing collaboration workflows.What do we mean by workflow? A process built on top of git that incorporates **pull requests** and **branches**. Typically, this is provided by sites like: GitHub, GitLab, BitBucket. Exercise 10: Create a GitHub/GitLab/BitBucket repo and sync your repo to it.
###Code
# your remote repo should now show up
!git remote -v
###Output
origin https://github.com/haugstve/bus_number_tutorial.git (fetch)
origin https://github.com/haugstve/bus_number_tutorial.git (push)
###Markdown
For example (using SSL): origin [email protected]:${GITHUB_USERNAME}/bus_number_tutorial.git (fetch) origin [email protected]:${GITHUB_USERNAME}/bus_number_tutorial.git (push) GitHub workflow cheatsheetSee https://github.com/hackalog/bus_number/wiki/Github-Workflow-Cheat-Sheet Life Rules for using `git`* Always work on a branch: `git checkout -b my_branch_name`. Delete branches once they are merged.* **Never** push to master. Always **work on a branch** and do a pull request.* Seriously, don't do work on master if you are collaborating with **anyone**.* If you pushed it anywhere, or shared it with anyone, don't `git rebase`. In fact, if you're reading this, don't `git rebase`. Save that for when you are comfortable solving git merge nightmares on your own.Here are some common tasks in git/github Starting the day. Where was I? What was I doing?```git branch What branch am I currently on? e.g. {my_branch}git status anything I forgot to commit? If so...git commit ... Commit work in progress``` Didn't I do some work at home last night?```git checkout master leave whatever branch I was ongit fetch origin --prune Check for something newgit merge origin/master If updates available, update!git branch --merged master check for any merged branches that can be safely deletedgit branch -d {name_of_merged_branch} delete any fully merged branches``` Anything fun happening upstream?```git checkout mastergit fetch upstream --prune grab latest changes from upstream repogit merge upstream/master merge them into local copy of my formgit push origin master push latest upstream changes to my forked repogit branch --merged master check for any merged branches that can be safely deletedgit branch -d {name_of_merged_branch} delete any fully merged branches```Now that `master` is up to date, you should merge whatever happened in `master` into your development branch:```git checkout {my_branch}git merge master merges master->{my_branch}git push origin {my_branch} Let Github know about the merge``` Some useful references if `gitflow` isn't second nature to you yet* Introduction to GitHub tutorial: https://lab.github.com/githubtraining/introduction-to-github* Git Handbook: https://guides.github.com/introduction/git-handbook/ Exercise 11:* Create a branch called `add_sklearn`* Add a scikit-learn dependency* Check in these changes using git to your local repo* Push the new branch to GitHub* Create a pull request to merge this branch into master* Merge your PR (delete the branch afterwards)* Sync your local repo with GitHub, including deleting the merged branches Python ModulesBy default, we keep our source code in a module called `src`. (this can be overridden in the cookieccutter)This is enabled via one line in `environment.yml`:```- pip: - -e .```This creates an **editable module**, and looks in the current directory for a file called `setup.py` to indicate the module name and location
###Code
# %load ../setup.py
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='Increase my bus number',
author='haugstve',
license='MIT',
)
###Output
_____no_output_____
###Markdown
This lets you easily use your code in notebooks and other scripts, and avoids any `sys.path.append` silliness ASIDE: Semantic VersioningSemantic versioning (or *semver*), refers to the convention of versioning with a triple: MAJOR.MINOR.PATCHWith the following convention: when releasing new versions, increment the:* MAJOR version when you make **incompatible API changes**,* MINOR version when you **add functionality** in a backwards-compatible manner, and* PATCH version when you make backwards-compatible **bug fixes**.If you have no other plan, this is a great convention to follow.For an obscene amount of detail on this concept, see https://semver.org/ Exercise 11:* add your favorite utility function to `src/utils`* increment the version number of the editable package (do this in `setup.py`)* run `make requirements` (required if you added dependencies for your utility function)* import your utility function and run it from this notebook
###Code
from src import utils
import pandas as pd
from src import dplyr
utils.save_json('test.json',['a','b','c'])
df = pd.DataFrame({'col1': [1, 2, 3, 4],
'col2': [1, 2, 1, 2],
'col3': ['a', 'b', 'a', 'b']})
df.filter('col2 == 2').shape
import seaborn as sns
iris = sns.load_dataset('iris')
(iris.mutate(sepal_ratio = iris.sepal_length/iris.sepal_width)
.head())
(iris.select('sepal_length','sepal_width')
.head())
(iris.filter('sepal_length > 4')
.head())
(iris.arrange(by = 'sepal_length')
.head())
# A handy magic that allows us to edit modules and have them stay up to date in the notebook. In this case, src.
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Tutorial 1: Reproducible Environments(Continued from `README.md`) Overview* Requirements: The Bare Minimum * Using a Data Science Template: `cookiecutter`* Virtual Environments: `conda` and environment files* Revision Control: git and a git workflow * Installing, Enabling, and using nbdime* The Data Science DAG * make, Makefiles and data flow* Python Modules * Creating an editable module* Testing: doctest, pytest, hypothesis We'll start out by checking that all the requirements are met from the previous exercises (started in `README.md`) Exercise 1: Install the requirements* Anaconda* Cookiecutter* make* git Test your installation
###Code
!conda --version # or `$CONDA_EXE --version` in some environments
!make --version
!git --version
!cookiecutter --version
###Output
_____no_output_____
###Markdown
Exercise 2: Start your cookiecutter-based projectCreate a project called `bus_number_tutorial`: Use conda as your virtualenv manager Use python 3.6 or greaterWhen complete, you should have a fully populated project directory, complete with customized README.md.We will be working in this project from now on. Exercise 2b:Explore the `README.md` from your new `bus_number_tutorial` project(Hint: You can use the `%load` magic, or `!cat` to look at it in your notebook) Exercise 3: Set up your virtual environment and install all dependenciesCreate and activate your `bus_number_tutorial` conda environment using the above make commands. Your `active environment` should be `bus_number_tutorial`
###Code
!conda info
###Output
_____no_output_____
###Markdown
If done correctly, you should also be able to import from `src`
###Code
# if importing src doesn't work, try `make requirements`
import src
###Output
_____no_output_____
###Markdown
Exercise 4: Pick up this tutorial in your new repo* Copy the notebooks from `bus_number` into your new `bus_number_tutorial` repo* Run jupyter notebook and open `notebooks/10-reproducible-environment.ipynb`If you're currently running this notebook and the checks from the previous exercises worked, then you're in business!Keep going from here! Revision Control: `git`How do we keep track of our changes? We use **git**.Before we do anything interesting, let's initialize a git repository (repo) here. Exercise 5: Initialize a git repo for `bus_number_tutorial` ```git initgit add .git commit -m "Initial Import"```
###Code
!git status
###Output
_____no_output_____
###Markdown
We will get back to using git again soon. Exercise 6: Add a dependencyModify the environment file so that `make requirements` installs some additional packages* install `joblib` using conda* install `nbdime` using pip
###Code
# Check that you now have scikit-learn and nbdime installed
# Don't forget that you need to run `make requirements` once you've change the `environment.yml` file
import joblib
import nbdime
###Output
_____no_output_____
###Markdown
Exercise 7: Basic git interactionsCheck the changes to your `environment.yml` file into your git repo See what has changed with git:
###Code
!git status
!git diff -u ../environment.yml
###Output
_____no_output_____
###Markdown
To add or reject your changes incrementally:
###Code
#!git add -p
#!git reset -p
###Output
_____no_output_____
###Markdown
Commit the changes
###Code
#!git commit -v
# You should have no differences in your branch now
# Except for those that you've made by running notebooks
!git status
###Output
_____no_output_____
###Markdown
The Data Science DAGDAG = Directed Acyclic Graph. That means the process eventually stops. (This is a good thing!) It also means we can use a super old, but incredibly handy tool to implement this workflow: `make`. Make, Makefiles, and the Data FlowWe use a `Makefile` to organize and invoke the various steps in our Data Science pipeline.You have already used this file when you created your virtual environment in the first place:```make create_environment```Here are the steps we will be working through in this tutorial:A [PDF version of the cheat sheet](references/cheat_sheet.pdf) is also available. What's my make target doing?If you are ever curious what commands a `make` command will invoke (including any invoked dependencies), use `make -n`, which lists the commands without executing them:
###Code
%%bash
cd .. && make -n requirements
###Output
_____no_output_____
###Markdown
We use a cute **self-documenting makefiles trick** (borrowed from `cookiecutter-datascience`) to make it easy to document the various targets that you add. This documentation is produced when you type a plain `make`:
###Code
%%bash
cd .. && make
###Output
_____no_output_____
###Markdown
Under the Hood: The Format of a Makefile``` Comment to appear in the auto-generated documentationthing_to_build: space separated list of dependencies command_to_run there is a tab before this command. another_command_to_run every line gets run in a *new shell*``` Exercise 8: What does this makefile print when you run `make train`?
###Code
%%file Makefile.test
data: raw
@echo "Build Datasets"
train_test_split:
@echo "do train/test split"
train: data transform_data train_test_split
@echo "Train Models"
transform_data:
@echo "do a data transformation"
raw:
@echo "Fetch raw data"
###Output
_____no_output_____
###Markdown
Note: If you see: ```*** missing separator. Stop.``` it's because you have used spaces instead of **tabs** before your commands. Exercise 9: What happens when you add a cycle to a MakefileSet up a makefile with a cyclic dependency and run it Using a Makefile like this is an easy way to set up a process flow expressed as a Directed Acyclic Graph (DAG).**Note**: We have only scratched the surface here. The are lots of interesting tricks you can do with make.* http://zmjones.com/make/* http://blog.byronjsmith.com/makefile-shortcuts.html* https://www.gnu.org/software/make/manual/ Back to Revision Control: git workflowsGit isn't really a collaboration tool. It's more a tool for implementing collaboration workflows.What do we mean by workflow? A process built on top of git that incorporates **pull requests** and **branches**. Typically, this is provided by sites like: GitHub, GitLab, BitBucket. Some useful references if `gitflow` isn't second nature to you yet* Introduction to GitHub tutorial: https://lab.github.com/githubtraining/introduction-to-github* Git Handbook: https://guides.github.com/introduction/git-handbook/* GitHub workflow cheatsheet: https://github.com/hackalog/bus_number/wiki/Github-Workflow-Cheat-Sheet Life Rules for using `git`* Always work on a branch: `git checkout -b my_branch_name`. Delete branches once they are merged.* **Never** push to master. Always **work on a branch** and do a pull request.* Seriously, don't do work on master if you are collaborating with **anyone**.* If you pushed it anywhere, or shared it with anyone, don't `git rebase`. In fact, if you're reading this, don't `git rebase`. Save that for when you are comfortable solving git merge nightmares on your own. Exercise 10: Create a GitHub/GitLab/BitBucket repo and sync your repo to it. Exercise 11:* Create a branch called `add_sklearn`* Add a scikit-learn dependency* Check in these changes using git to your local repo* Push the new branch to GitHub* Create a pull request to merge this branch into master* Merge your PR (delete the branch afterwards)* Sync your local repo with GitHub, including deleting the merged branches Python ModulesBy default, we keep our source code in a module called `src`. (this can be overridden in the cookieccutter)This is enabled via one line in `environment.yml`:```- pip: - -e .```This creates an **editable module**, and looks in the current directory for a file called `setup.py` to indicate the module name and location
###Code
# %load ../setup.py
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='Up Your Bus Number: A Primer for Reproducible Data Science',
author='Tutte Institute for Mathematics and Computing',
license='MIT',
)
###Output
_____no_output_____
###Markdown
This lets you easily use your code in notebooks and other scripts, and avoids any `sys.path.append` silliness ASIDE: Semantic VersioningSemantic versioning (or *semver*), refers to the convention of versioning with a triple: MAJOR.MINOR.PATCHWith the following convention: when releasing new versions, increment the:* MAJOR version when you make **incompatible API changes**,* MINOR version when you **add functionality** in a backwards-compatible manner, and* PATCH version when you make backwards-compatible **bug fixes**.If you have no other plan, this is a great convention to follow.For an obscene amount of detail on this concept, see https://semver.org/ Exercise 11:* add your favorite utility function to `src/utils`* increment the version number of the editable package* run `make requirements` (required if you added dependencies for your utility function)* import your utility function and run it from this notebook Testing: doctest, pytest, coveragePython has built in testing frameworks via:* doctests:https://docs.python.org/3/library/doctest.htmlmodule-doctest* unittest: https://docs.python.org/3/library/unittest.htmlAdditionally, you'll want to make regular use of:* pytest: https://docs.pytest.org/en/latest/* pytest-cov: https://pypi.org/project/pytest-cov/* hypothesis: https://hypothesis.readthedocs.io/en/latestCookiecutter (vanilla flavoured) comes witha setup for the `tox` testing framework built in.* https://tox.readthedocs.io/en/latest/ Exercise 12:Add a `make test` target to your makefile that:* runs doctests* runs pytest unit tests* (extra credit) Displays test coverage results When you run `make test`, you will find tests that will fail in `src/test_example.py`. Fix them in the next exercise.
###Code
!cd .. && make test
###Output
_____no_output_____
###Markdown
***Note:*** `make test` is normally functionality built into `cookiecutter-easydata`. We're building it from scratch here for the sake of practice. Exercise 13:Fix the failing tests
###Code
# Should pass all tests now!
!cd .. && make test
###Output
_____no_output_____
###Markdown
Exercise 14:* Check in all your changes to git* Merge them into your master branch via a PR in GitHub
###Code
!git status
###Output
_____no_output_____
###Markdown
Testing: doctest, pytest, coveragePython has built in testing frameworks via:* doctests:https://docs.python.org/3/library/doctest.htmlmodule-doctest* unittest: https://docs.python.org/3/library/unittest.htmlAdditionally, you'll want to make regular use of:* pytest: https://docs.pytest.org/en/latest/* pytest-cov: https://pypi.org/project/pytest-cov/* hypothesis: https://hypothesis.readthedocs.io/en/latestCookiecutter (vanilla flavoured) comes witha setup for the `tox` testing framework built in.* https://tox.readthedocs.io/en/latest/ Exercise 12:Add a `make test` target to your makefile that:* runs doctests* runs pytest unit tests* (extra credit) Displays test coverage results When you run `make test`, you will find tests that will fail in `src/test_example.py`. Fix them in the next exercise.
###Code
!cd .. && make test
###Output
cd src && pytest --doctest-modules --cov
[1m============================= test session starts ==============================[0m
platform darwin -- Python 3.7.3, pytest-4.6.3, py-1.8.0, pluggy-0.12.0
rootdir: /Users/danielhaugstvedt/Developer/bus_number_tutorial
plugins: nbval-0.9.1, cov-2.7.1
collected 8 items [0m[1m[1m
dplyr.py [32m.[0m[36m [ 12%][0m
test_example.py [32m.[0m[32m.[0m[36m [ 37%][0m
data/fetch.py [32m.[0m[32m.[0m[32m.[0m[32m.[0m[36m [ 87%][0m
data/utils.py [32m.[0m[36m [100%][0mCoverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.77752.109619': CoverageException: Doesn't seem to be a coverage.py data file
Coverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.79868.474315': CoverageException: Doesn't seem to be a coverage.py data file
Coverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.77761.954994': CoverageException: Doesn't seem to be a coverage.py data file
---------- coverage: platform darwin, python 3.7.3-final-0 -----------
Name Stmts Miss Cover
------------------------------------------------
__init__.py 0 0 100%
analysis/__init__.py 0 0 100%
analysis/analysis.py 105 86 18%
analysis/run_analysis.py 23 9 61%
data/__init__.py 4 0 100%
data/apply_transforms.py 27 12 56%
data/datasets.py 322 272 16%
data/fetch.py 152 117 23%
data/localdata.py 1 0 100%
data/make_dataset.py 15 4 73%
data/transform_data.py 88 72 18%
data/transformers.py 42 29 31%
data/utils.py 85 61 28%
dplyr.py 14 2 86%
features/__init__.py 0 0 100%
features/build_features.py 0 0 100%
logging.py 7 0 100%
models/__init__.py 3 0 100%
models/algorithms.py 5 4 20%
models/model_list.py 74 60 19%
models/predict.py 100 80 20%
models/predict_model.py 22 9 59%
models/train.py 54 39 28%
models/train_models.py 25 11 56%
paths.py 17 0 100%
test_example.py 8 0 100%
utils.py 49 37 24%
visualization/__init__.py 0 0 100%
visualization/visualize.py 0 0 100%
workflow.py 8 0 100%
------------------------------------------------
TOTAL 1250 904 28%
[32m[1m=========================== 8 passed in 1.34 seconds ===========================[0m
###Markdown
***Note:*** `make test` is normally functionality built into `cookiecutter-easydata`. We're building it from scratch here for the sake of practice. Exercise 13:Fix the failing tests
###Code
# Should pass all tests now!
!cd .. && make test
###Output
cd src && pytest --doctest-modules --cov
[1m============================= test session starts ==============================[0m
platform darwin -- Python 3.7.3, pytest-4.6.3, py-1.8.0, pluggy-0.12.0
rootdir: /Users/danielhaugstvedt/Developer/bus_number_tutorial
plugins: nbval-0.9.1, cov-2.7.1
collected 8 items [0m[1m[1m
dplyr.py [32m.[0m[36m [ 12%][0m
test_example.py [32m.[0m[32m.[0m[36m [ 37%][0m
data/fetch.py [32m.[0m[32m.[0m[32m.[0m[32m.[0m[36m [ 87%][0m
data/utils.py [32m.[0m[36m [100%][0mCoverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.77752.109619': CoverageException: Doesn't seem to be a coverage.py data file
Coverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.79868.474315': CoverageException: Doesn't seem to be a coverage.py data file
Coverage.py warning: Couldn't read data from '/Users/danielhaugstvedt/Developer/bus_number_tutorial/src/.coverage.daniels-mbp.oslo.no.ibm.com.77761.954994': CoverageException: Doesn't seem to be a coverage.py data file
---------- coverage: platform darwin, python 3.7.3-final-0 -----------
Name Stmts Miss Cover
------------------------------------------------
__init__.py 0 0 100%
analysis/__init__.py 0 0 100%
analysis/analysis.py 105 86 18%
analysis/run_analysis.py 23 9 61%
data/__init__.py 4 0 100%
data/apply_transforms.py 27 12 56%
data/datasets.py 322 272 16%
data/fetch.py 152 117 23%
data/localdata.py 1 0 100%
data/make_dataset.py 15 4 73%
data/transform_data.py 88 72 18%
data/transformers.py 42 29 31%
data/utils.py 85 61 28%
dplyr.py 14 2 86%
features/__init__.py 0 0 100%
features/build_features.py 0 0 100%
logging.py 7 0 100%
models/__init__.py 3 0 100%
models/algorithms.py 5 4 20%
models/model_list.py 74 60 19%
models/predict.py 100 80 20%
models/predict_model.py 22 9 59%
models/train.py 54 39 28%
models/train_models.py 25 11 56%
paths.py 17 0 100%
test_example.py 8 0 100%
utils.py 49 37 24%
visualization/__init__.py 0 0 100%
visualization/visualize.py 0 0 100%
workflow.py 8 0 100%
------------------------------------------------
TOTAL 1250 904 28%
[32m[1m=========================== 8 passed in 1.55 seconds ===========================[0m
###Markdown
Exercise 14:* Check in all your changes to git* Merge them into your master branch via a PR in GitHub
###Code
!git status
###Output
On branch master
Your branch is ahead of 'origin/master' by 6 commits.
(use "git push" to publish your local commits)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
[31mmodified: 10-reproducible-environment.ipynb[m
[31mmodified: ../src/test_example.py[m
no changes added to commit (use "git add" and/or "git commit -a")
###Markdown
Tutorial 1: Reproducible Environments(Continued from `README.md`) Overview* Requirements: The Bare Minimum * Using a Data Science Template: `cookiecutter`* Virtual Environments: `conda` and environment files* Revision Control: git and a git workflow * Installing, Enabling, and using nbdime* The Data Science DAG * make, Makefiles and data flow* Python Modules * Creating an editable module* Testing: doctest, pytest, hypothesis We'll start out by checking that all the requirements are met from the previous exercises (started in `README.md`) Exercise 1: Install the requirements* Anaconda* Cookiecutter* make* git Test your installation
###Code
!conda --version # or `$CONDA_EXE --version` in some environments
!make --version
!git --version
###Output
_____no_output_____
###Markdown
Exercise 2: Start your cookiecutter-based projectCreate a project called `Bus Number Tutorial`: Use conda as your virtualenv manager Use python 3.6 or greaterWhen complete, you should have a fully populated project directory, complete with customized README.md.We will be working in this project from now on. Exercise 2b:Explore the `README.md` from your new `bus_number_tutorial` repo(Hint: You can use the `%load` magic, or `!cat` to look at it in your notebook) Exercise 3: Set up your virtual environment and install all dependenciesCreate and activate your `bus_number_tutorial` conda environment using the above make commands. Your `active environment` should be `bus_number_tutorial`
###Code
!conda info
###Output
_____no_output_____
###Markdown
**Note:** If you are using **JupyterHub**, the bash magics `!` and `%%bash` will not work as expected, that is, they will drop you into your root JupyterHub environment, as opposed to the conda kernel that you a running this notebook in, and you will not see `bus_number_tutorial`. To get around this, you will need to run the bash commands in this notebook from a terminal instance with your `bus_number_tutorial` conda environment activated. If done correctly, you should also be able to import from `src`
###Code
# if importing src doesn't work, try `make requirements`
import src
###Output
_____no_output_____
###Markdown
Exercise 4: Pick up this tutorial in your new repo* Run jupyter notebook and open `notebooks/10-reproducible-environment.ipynb`If you're currently running this notebook and the checks from the previous exercises worked, then you're in business!Keep going from here! Revision Control: `git`How do we keep track of our changes? We use **git**.Before we do anything interesting, let's initialize a git repository (repo) here. Exercise 5: Initialize a git repo for `bus_number_tutorial` ```git initgit add .git commit -m "Initial Import"```
###Code
!git status
###Output
_____no_output_____
###Markdown
We will get back to using git again soon. Exercise 6: Add a dependencyModify the environment file so that `make requirements` installs some additional packages* install `joblib` using conda* install `nbdime` using pip
###Code
# Check that you now have joblib and nbdime installed
# Don't forget that you need to run `make requirements` once you've change the `environment.yml` file
import joblib
import nbdime
###Output
_____no_output_____
###Markdown
Exercise 7: Basic git interactionsCheck the changes to your `environment.yml` file into your git repo See what has changed with git:
###Code
!git status
!git diff -u ../environment.yml
###Output
_____no_output_____
###Markdown
To add or reject your changes incrementally:
###Code
#!git add -p
#!git reset -p
###Output
_____no_output_____
###Markdown
Commit the changes
###Code
#!git commit -v
# You should have no differences in your branch now
# Except for those that you've made by running notebooks
!git status
###Output
_____no_output_____
###Markdown
The Data Science DAGDAG = Directed Acyclic Graph. That means the process eventually stops. (This is a good thing!) It also means we can use a super old, but incredibly handy tool to implement this workflow: `make`. Make, Makefiles, and the Data FlowWe use a `Makefile` to organize and invoke the various steps in our Data Science pipeline.You have already used this file when you created your virtual environment in the first place:```make create_environment```Here are the steps we will be working through in this tutorial:A [PDF version of the cheat sheet](references/cheat_sheet.pdf) is also available. What's my make target doing?If you are ever curious what commands a `make` command will invoke (including any invoked dependencies), use `make -n`, which lists the commands without executing them:
###Code
%%bash
cd .. && make -n requirements
###Output
_____no_output_____
###Markdown
We use a cute **self-documenting makefiles trick** (borrowed from `cookiecutter-datascience`) to make it easy to document the various targets that you add. This documentation is produced when you type a plain `make`:
###Code
%%bash
cd .. && make
###Output
_____no_output_____
###Markdown
Under the Hood: The Format of a Makefile``` Comment to appear in the auto-generated documentationthing_to_build: space separated list of dependencies command_to_run there is a tab before this command. another_command_to_run every line gets run in a *new shell*```
###Code
%%file Makefile.test
data: raw
@echo "Build Datasets"
train_test_split:
@echo "do train/test split"
train: data transform_data train_test_split
@echo "Train Models"
transform_data:
@echo "do a data transformation"
raw:
@echo "Fetch raw data"
# Note: you can run a specific Makefile with with -f option
!make -f Makefile.test data
###Output
_____no_output_____
###Markdown
Note: If you see: ```*** missing separator. Stop.``` it's because you have used spaces instead of **tabs** before your commands. Exercise 8: What does this `Makefile.test` print when you run `make train`? Exercise 9: What happens when you add a cycle to a MakefileSet up a makefile with a cyclic dependency and run it Using a Makefile like this is an easy way to set up a process flow expressed as a Directed Acyclic Graph (DAG).**Note**: We have only scratched the surface here. The are lots of interesting tricks you can do with make.* http://zmjones.com/make/* http://blog.byronjsmith.com/makefile-shortcuts.html* https://www.gnu.org/software/make/manual/ Back to Revision Control: git workflowsGit isn't really a collaboration tool. It's more a tool for implementing collaboration workflows.What do we mean by workflow? A process built on top of git that incorporates **pull requests** and **branches**. Typically, this is provided by sites like: GitHub, GitLab, BitBucket. Exercise 10: Create a GitHub/GitLab/BitBucket repo and sync your repo to it.
###Code
# your remote repo should now show up
!git remote -v
###Output
_____no_output_____
###Markdown
For example (using SSL): origin [email protected]:${GITHUB_USERNAME}/bus_number_tutorial.git (fetch) origin [email protected]:${GITHUB_USERNAME}/bus_number_tutorial.git (push) GitHub workflow cheatsheetSee https://github.com/hackalog/bus_number/wiki/Github-Workflow-Cheat-Sheet Life Rules for using `git`* Always work on a branch: `git checkout -b my_branch_name`. Delete branches once they are merged.* **Never** push to master. Always **work on a branch** and do a pull request.* Seriously, don't do work on master if you are collaborating with **anyone**.* If you pushed it anywhere, or shared it with anyone, don't `git rebase`. In fact, if you're reading this, don't `git rebase`. Save that for when you are comfortable solving git merge nightmares on your own.Here are some common tasks in git/github Starting the day. Where was I? What was I doing?```git branch What branch am I currently on? e.g. {my_branch}git status anything I forgot to commit? If so...git commit ... Commit work in progress``` Didn't I do some work at home last night?```git checkout master leave whatever branch I was ongit fetch origin --prune Check for something newgit merge origin/master If updates available, update!git branch --merged master check for any merged branches that can be safely deletedgit branch -d {name_of_merged_branch} delete any fully merged branches``` Anything fun happening upstream?```git checkout mastergit fetch upstream --prune grab latest changes from upstream repogit merge upstream/master merge them into local copy of my formgit push origin master push latest upstream changes to my forked repogit branch --merged master check for any merged branches that can be safely deletedgit branch -d {name_of_merged_branch} delete any fully merged branches```Now that `master` is up to date, you should merge whatever happened in `master` into your development branch:```git checkout {my_branch}git merge master merges master->{my_branch}git push origin {my_branch} Let Github know about the merge``` Some useful references if `gitflow` isn't second nature to you yet* Introduction to GitHub tutorial: https://lab.github.com/githubtraining/introduction-to-github* Git Handbook: https://guides.github.com/introduction/git-handbook/ Exercise 11:* Create a branch called `add_sklearn`* Add a scikit-learn dependency* Check in these changes using git to your local repo* Push the new branch to GitHub* Create a pull request to merge this branch into master* Merge your PR (delete the branch afterwards)* Sync your local repo with GitHub, including deleting the merged branches Python ModulesBy default, we keep our source code in a module called `src`. (this can be overridden in the cookieccutter)This is enabled via one line in `environment.yml`:```- pip: - -e .```This creates an **editable module**, and looks in the current directory for a file called `setup.py` to indicate the module name and location
###Code
# %load ../setup.py
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='Up Your Bus Number: A Primer for Reproducible Data Science',
author='Tutte Institute for Mathematics and Computing',
license='MIT',
)
###Output
_____no_output_____
###Markdown
This lets you easily use your code in notebooks and other scripts, and avoids any `sys.path.append` silliness ASIDE: Semantic VersioningSemantic versioning (or *semver*), refers to the convention of versioning with a triple: MAJOR.MINOR.PATCHWith the following convention: when releasing new versions, increment the:* MAJOR version when you make **incompatible API changes**,* MINOR version when you **add functionality** in a backwards-compatible manner, and* PATCH version when you make backwards-compatible **bug fixes**.If you have no other plan, this is a great convention to follow.For an obscene amount of detail on this concept, see https://semver.org/ Exercise 11:* add your favorite utility function to `src/utils`* increment the version number of the editable package (do this in `setup.py`)* run `make requirements` (required if you added dependencies for your utility function)* import your utility function and run it from this notebook
###Code
# A handy magic that allows us to edit modules and have them stay up to date in the notebook. In this case, src.
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Testing: doctest, pytest, coveragePython has built in testing frameworks via:* doctests:https://docs.python.org/3/library/doctest.htmlmodule-doctest* unittest: https://docs.python.org/3/library/unittest.htmlAdditionally, you'll want to make regular use of:* pytest: https://docs.pytest.org/en/latest/* pytest-cov: https://pypi.org/project/pytest-cov/* hypothesis: https://hypothesis.readthedocs.io/en/latestCookiecutter (vanilla flavoured) comes witha setup for the `tox` testing framework built in.* https://tox.readthedocs.io/en/latest/ Exercise 12:Add a `make test` target to your makefile that:* runs doctests* runs pytest unit tests* (extra credit) Displays test coverage results When you run `make test`, you will find tests that will fail in `src/test_example.py`. Fix them in the next exercise.
###Code
!cd .. && make test
###Output
_____no_output_____
###Markdown
***Note:*** `make test` is normally functionality built into `cookiecutter-easydata`. We're building it from scratch here for the sake of practice. Exercise 13:Fix the failing tests
###Code
# Should pass all tests now!
!cd .. && make test
###Output
_____no_output_____
###Markdown
Exercise 14:* Check in all your changes to git* Merge them into your master branch via a PR in GitHub
###Code
!git status
###Output
_____no_output_____ |
Atmosea_homework/time_serirs/compare1.ipynb | ###Markdown
Tinistial
###Code
import os
rootdir = './compare1/'
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件
#ds = xr.open_dataset(rootdir+list[0],decode_times=False)
def DYT():
return x
a = 6.371e6
ASEA =
for yy in range(0,len(list)):
try:
ds = xr.open_dataset(rootdir+list[yy],decode_times=False)
except:
continue
temp = (ds['ts'][0, 0])[:,1:]
EB = 0.0
for j in range(115):
for i in range(118):
EB = EB + DYT(j)*np*sin(j)*(a^2)*
temp
a = 6.371e6
a
plt.figure(figsize=(8,6))
ax = plt.subplot()
ax.plot(EB,color='gray')
#as.set_xtick()
ax.set(xlabel='time (Years)', ylabel='Temperature ($^{\circ}C$)',
title='Tinitial Global SST ')
#tick = [0,1980,1982,1984,1986,1988]
#ax.set_xticklabels(tick)
ax.set_ylim(17.0,18.7)
#plt.savefig('GloSST78-90.png')
plt.show()
###Output
_____no_output_____
###Markdown
Start from Fort.22
###Code
rootdir = './compare2/'
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件
#ds = xr.open_dataset(rootdir+list[0],decode_times=False)
EB2 = []
for j in range(0,len(list)):
try:
ds2 = xr.open_dataset(rootdir+list[j],decode_times=False)
except:
continue
temp2 = (ds2['ts'][0, 0])[:,1:]
Temp2 = np.nanmean(temp2.values)
EB2.append(Temp2)
plt.figure(figsize=(8,6))
ax2 = plt.subplot()
ax2.plot(EB2,color='gray')
#as.set_xtick()
ax2.set(xlabel='time (Years)', ylabel='Temperature ($^{\circ}C$)',
title='Tinitial Global SST ')
#tick = [0,1980,1982,1984,1986,1988]
#ax2.set_xticklabels(tick)
ax2.set_ylim(17.0,18.7)
#plt.savefig('GloSST78-90.png')
plt.show()
EB2
###Output
_____no_output_____ |
_notebooks/2022-02-11-python-stack-traceback-more-info.ipynb | ###Markdown
Python - Getting more information from Tracebacks> A tutorial to get more information from Python exception stack traceback.- toc: true - badges: true- comments: true- categories: [python]- keywords: [python, traceback, exception, frame]- image: images/copied_from_nb/images/2022-02-11-python-stack-traceback-more-info.jpeg  AboutThis notebook demonstrates what the Python Traceback object is, and how can we get more information out of it to better diagnose exception messages. CreditThis blog post is based on an article originally written in `Python Cookbook` published by `O'Reilly Media, Inc.` and released July 2002. In book's chapter 15, there is a section with the title `Getting More Information from Tracebacks` written by `Bryn Keller`. An online version of this article is available at https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s05.html.The original article uses Python 2.2, but I have adapted it for Python 3.8. Also, I have added some commentary to give more insights on Python Traceback object. Environment Details
###Code
#collapse-hide
from platform import python_version
print("python==" + python_version())
###Output
python==3.8.5
###Markdown
Discussion Consider the following toy example where we are getting some data from an external source (an API call, a DB call, etc.), and we need to find the length of individual items provided in the list. We know that items in the list will be of type `str` so we have used a `len()` function on it.We got an exception when we ran our function on received data, and now we are trying to investigate what caused the error.
###Code
#collapse-hide
# this is intentionally hidden as we don't know about the data received from an external source.
data = ["1", "22", 333, "4444"]
##
# our toy example function.
import sys, traceback
def get_items_len(items: list) -> list:
"""
this function returns the length of items received in a list.
"""
items_len = []
for i in items:
items_len.append(len(i))
return items_len
##
# let's run our function on "data" received from an external source
try:
get_items_len(data)
except Exception as e:
print(traceback.print_exc())
###Output
None
###Markdown
We got an exception while data processing and the `Traceback` message gives us some details. It tells us that we have received some data of type _integer_ instead of _string_, and we are trying to call _len()_ function on it. But we don't know the actual data value that caused the exception, and we don't know the _index_ of the item in the list that caused this error. Depending on the use case, information about the local variables, or input data that caused the error can be crucial in diagnosing the root cause of an error.Fortunately, all this information is already available to us in the Traceback object, but there are no built-in methods that give this information directly. Let us try some of the built-in methods on the Traceback object to see the kind of information we could get from them.
###Code
#collapse-output
# calling traceback module built-in methods
try:
get_items_len(data)
except Exception as e:
print("***** Exception *****")
print(e)
exc_type, exc_value, exc_traceback = sys.exc_info()
print("\n***** print_tb *****")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print("\n***** print_exception *****")
# exc_type below is ignored on 3.5 and later
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print("\n***** print_exc *****")
traceback.print_exc(limit=2, file=sys.stdout)
print("\n***** format_exc, first and last line *****")
formatted_lines = traceback.format_exc().splitlines()
print(formatted_lines[0])
print(formatted_lines[-1])
print("\n***** format_exception *****")
# exc_type below is ignored on 3.5 and later
print(repr(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
print("\n***** extract_tb *****")
print(repr(traceback.extract_tb(exc_traceback)))
print("\n***** format_tb *****")
print(repr(traceback.format_tb(exc_traceback)))
print("\n***** tb_lineno *****", exc_traceback.tb_lineno)
###Output
***** Exception *****
object of type 'int' has no len()
***** print_tb *****
File "<ipython-input-5-73d5b316a567>", line 4, in <module>
get_items_len(data)
***** print_exception *****
Traceback (most recent call last):
File "<ipython-input-5-73d5b316a567>", line 4, in <module>
get_items_len(data)
File "<ipython-input-3-8421f841ba77>", line 11, in get_items_len
items_len.append(len(i))
TypeError: object of type 'int' has no len()
***** print_exc *****
Traceback (most recent call last):
File "<ipython-input-5-73d5b316a567>", line 4, in <module>
get_items_len(data)
File "<ipython-input-3-8421f841ba77>", line 11, in get_items_len
items_len.append(len(i))
TypeError: object of type 'int' has no len()
***** format_exc, first and last line *****
Traceback (most recent call last):
TypeError: object of type 'int' has no len()
***** format_exception *****
['Traceback (most recent call last):\n', ' File "<ipython-input-5-73d5b316a567>", line 4, in <module>\n get_items_len(data)\n', ' File "<ipython-input-3-8421f841ba77>", line 11, in get_items_len\n items_len.append(len(i))\n', "TypeError: object of type 'int' has no len()\n"]
***** extract_tb *****
[<FrameSummary file <ipython-input-5-73d5b316a567>, line 4 in <module>>, <FrameSummary file <ipython-input-3-8421f841ba77>, line 11 in get_items_len>]
***** format_tb *****
[' File "<ipython-input-5-73d5b316a567>", line 4, in <module>\n get_items_len(data)\n', ' File "<ipython-input-3-8421f841ba77>", line 11, in get_items_len\n items_len.append(len(i))\n']
***** tb_lineno ***** 4
###Markdown
All these methods are useful but we are still short on information about the state of local variables when the system crashed. Before writing our custom function to get the variables state at the time of exception, let us spend some time to understand the working of Traceback object. Traceback Module> https://docs.python.org/3/library/traceback.html This module provides an easy-to-use interface to work with `traceback objects`. It provides multiple functions that we can use to extract the required information from traceback. So far, we have used methods from this module in the above examples. Traceback Objects> https://docs.python.org/3/reference/datamodel.html > On this page search for term "Traceback objects"Traceback objects represent a stack trace of an exception. A traceback object is implicitly created when an exception occurs and may also be explicitly created by initializing an instance of class `types.TracebackType`. _traceback_ object is also an instance of _types.TracebackType_ class. When an exception occurs, a traceback object is initialized for us, and we can obtain it from any of the following two methods. 1. It is available as a third item of the tuple returned by sys.exc_info() "`(type, value, traceback)`"2. It is available as the `__traceback__` object of the caught exception. "`Exception.__traceback__`"A traceback object is a linked list of nodes, where each node is a `Frame object`. Frame objects form their own linked list but in the opposite direction of traceback objects. Together they work like a doubly-linked list, and we can use them to move back and forth in the stack trace history. It is the frame objects that hold all the stack's important information. traceback object has some special attributes* `tb_next` point to the next level in the stack trace (towards the frame where the exception occurred), or `None` if there is no next level* `tb_frame` points to the execution frame of the current level* `tb_lineno` gives the line number where the exception occurred
###Code
##
# method 1: get traceback object using sys.exc_info()
try:
get_items_len(data)
except Exception as e:
print(sys.exc_info()[2])
##
# method 2: get traceback object using Exception.__traceback__
try:
get_items_len(data)
except Exception as e:
print(e.__traceback__ )
###Output
<traceback object at 0x7f5c6c5c0180>
###Markdown
If there is no exception in the system, then calling sys.exc_info() will only return `None` values.
###Code
##
# no exception is generated so sys.exc_info() will return None values.
try:
get_items_len(['1','2','3','4'])
except Exception as e:
print(sys.exc_info()[2])
###Output
_____no_output_____
###Markdown
Frame Objects> https://docs.python.org/3/reference/datamodel.html > On this page search for term "Frame objects"Frame objects represent execution frames. It has some special attributes* `f_back` is a reference to the previous stack frame (towards the caller), or None if this is the bottom stack frame* `f_code` is the code object being executed in this frame. We will discuss `Code Objects` in next the section* `f_lineno` is the current line number of the frame — writing to this from within a trace function jumps to the given line (only for the bottom-most frame). A debugger can implement a Jump command (aka Set Next Statement) by writing to f_lineno. This attribute will give you the line number in the code on which exception occurred* `f_locals` is a dictionary used to lookup local variables. From this dictionary we can get all the local variables and their state at the time of exception* `f_globals` is a dictionary for global varaibles Code Objects> https://docs.python.org/3/reference/datamodel.html > On this page search for term "Code Objects"Code objects represent byte-compiled executable Python code or bytecode. Some of its attributes include* `co_name` gives the function name being executed* `co_filename` gives the filename from which the code was compiledThere are many other helpful attributes in this object, and you may read about them from the docs. Visual representation of Traceback, Frame and Code Objects *figure 1: Visual representation of Traceback, Frame and Code Objects* Custom fuction for additional exception infoNow with this additional information on stack trace objects, let us create a function to get variables state at the time of exception.
###Code
#collapse-show
def exc_info_plus():
"""
Provides the usual traceback information, followed by a listing of all the
local variables in each frame.
"""
tb = sys.exc_info()[2]
# iterate forward to the last (most recent) traceback object.
while 1:
if not tb.tb_next:
break
tb = tb.tb_next
stack = []
# get the most recent traceback frame
f = tb.tb_frame
# iterate backwards from recent to oldest traceback frame
while f:
stack.append(f)
f = f.f_back
# stack.reverse() # uncomment to get innermost (most recent) frame at the last
# get exception information and stack trace entries from most recent traceback object
exc_msg = traceback.format_exc()
exc_msg += "\n*** Locals by frame, innermost first ***"
for frame in stack:
exc_msg += f"\nFrame {frame.f_code.co_name} in {frame.f_code.co_filename} at line {frame.f_lineno}"
for key, value in frame.f_locals.items():
exc_msg += f"\n\t {key:20} = "
try:
data = str(value)
# limit variable's output to a certain number. You can adjust it as per your requirement.
# But not to remove it as output from large objects (e.g. Pandas DataFrame) can be troublesome.
output_limit = 50
exc_msg += (data[:output_limit] + "...") if len(data) > output_limit else data
except:
exc_msg += "<ERROR WHILE PRINTING VALUE>"
return exc_msg
#collapse-output
#now let us try our custom exception function and see the ouput
try:
get_items_len(data)
except Exception as e:
print(exc_info_plus())
###Output
Traceback (most recent call last):
File "<ipython-input-10-01264d9e470a>", line 4, in <module>
get_items_len(data)
File "<ipython-input-3-8421f841ba77>", line 11, in get_items_len
items_len.append(len(i))
TypeError: object of type 'int' has no len()
*** Locals by frame, innermost first ***
Frame get_items_len in <ipython-input-3-8421f841ba77> at line 11
items = ['1', '22', 333, '4444']
items_len = [1, 2]
i = 333
Frame <module> in <ipython-input-10-01264d9e470a> at line 6
__name__ = __main__
__doc__ = Automatically created module for IPython interacti...
__package__ = None
__loader__ = None
__spec__ = None
__builtin__ = <module 'builtins' (built-in)>
__builtins__ = <module 'builtins' (built-in)>
_ih = ['', '#collapse-hide\nfrom platform import python_...
_oh = {}
_dh = ['/data/_notebooks']
In = ['', '#collapse-hide\nfrom platform import python_...
Out = {}
get_ipython = <bound method InteractiveShell.get_ipython of <ipy...
exit = <IPython.core.autocall.ZMQExitAutocall object at 0...
quit = <IPython.core.autocall.ZMQExitAutocall object at 0...
_ =
__ =
___ =
_i = #collapse-show
def exc_info_plus():
"""
Pr...
_ii = ##
# no exception is generated so sys.exc_info() w...
_iii = ##
# method 2: get traceback object using Exceptio...
_i1 = #collapse-hide
from platform import python_version...
python_version = <function python_version at 0x7f5c72dbc430>
_i2 = #collapse-hide
# this is intentionally hidden as w...
data = ['1', '22', 333, '4444']
_i3 = ##
# our toy example function.
import sys, traceba...
sys = <module 'sys' (built-in)>
traceback = <module 'traceback' from '/usr/lib/python3.8/trace...
get_items_len = <function get_items_len at 0x7f5c6c62c790>
_i4 = ##
# let's run our function on "data" received fro...
_i5 = #collapse-output
# calling traceback module built-...
exc_type = <class 'TypeError'>
exc_value = object of type 'int' has no len()
exc_traceback = <traceback object at 0x7f5c6c5cf700>
formatted_lines = ['Traceback (most recent call last):', ' File "<i...
_i6 = ##
# method 1: get traceback object using sys.exc_...
_i7 = ##
# method 2: get traceback object using Exceptio...
_i8 = ##
# no exception is generated so sys.exc_info() w...
_i9 = #collapse-show
def exc_info_plus():
"""
Pr...
exc_info_plus = <function exc_info_plus at 0x7f5c6c62cc10>
_i10 = #collapse-output
#now let us try our custom except...
e = object of type 'int' has no len()
Frame run_code in /usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py at line 3418
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
code_obj = <code object <module> at 0x7f5c6c62eea0, file "<ip...
result = <ExecutionResult object at 7f5c6c5c88e0, execution...
async_ = False
__tracebackhide__ = __ipython_bottom__
old_excepthook = <bound method IPKernelApp.excepthook of <ipykernel...
outflag = True
Frame run_ast_nodes in /usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py at line 3338
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
nodelist = [<_ast.Try object at 0x7f5c6c5c8850>]
cell_name = <ipython-input-10-01264d9e470a>
interactivity = none
compiler = <IPython.core.compilerop.CachingCompiler object at...
result = <ExecutionResult object at 7f5c6c5c88e0, execution...
to_run_exec = [<_ast.Try object at 0x7f5c6c5c8850>]
to_run_interactive = []
mod = <_ast.Module object at 0x7f5c6c5c8430>
compare = <function InteractiveShell.run_ast_nodes.<locals>....
to_run = [(<_ast.Try object at 0x7f5c6c5c8850>, 'exec')]
node = <_ast.Try object at 0x7f5c6c5c8850>
mode = exec
code = <code object <module> at 0x7f5c6c62eea0, file "<ip...
asy = False
_async = False
Frame run_cell_async in /usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py at line 3146
raw_cell = #collapse-output
#now let us try our custom except...
silent = False
shell_futures = True
transformed_cell = #collapse-output
#now let us try our custom except...
preprocessing_exc_tuple = None
info = <ExecutionInfo object at 7f5c6c5c8be0, raw_cell="#...
error_before_exec = <function InteractiveShell.run_cell_async.<locals>...
cell = #collapse-output
#now let us try our custom except...
compiler = <IPython.core.compilerop.CachingCompiler object at...
_run_async = False
cell_name = <ipython-input-10-01264d9e470a>
code_ast = <_ast.Module object at 0x7f5c6c5c85e0>
interactivity = last_expr
result = <ExecutionResult object at 7f5c6c5c88e0, execution...
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
store_history = True
Frame _pseudo_sync_runner in /usr/local/lib/python3.8/dist-packages/IPython/core/async_helpers.py at line 68
coro = <coroutine object InteractiveShell.run_cell_async ...
Frame _run_cell in /usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py at line 2923
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
raw_cell = #collapse-output
#now let us try our custom except...
store_history = True
silent = False
shell_futures = True
preprocessing_exc_tuple = None
transformed_cell = #collapse-output
#now let us try our custom except...
coro = <coroutine object InteractiveShell.run_cell_async ...
runner = <function _pseudo_sync_runner at 0x7f5c724ba040>
Frame run_cell in /usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py at line 2877
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
raw_cell = #collapse-output
#now let us try our custom except...
store_history = True
silent = False
shell_futures = True
result = None
Frame run_cell in /usr/local/lib/python3.8/dist-packages/ipykernel/zmqshell.py at line 539
self = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
args = ('#collapse-output\n#now let us try our custom exc...
kwargs = {'store_history': True, 'silent': False}
__class__ = <class 'ipykernel.zmqshell.ZMQInteractiveShell'>
Frame do_execute in /usr/local/lib/python3.8/dist-packages/ipykernel/ipkernel.py at line 302
self = <ipykernel.ipkernel.IPythonKernel object at 0x7f5c...
code = #collapse-output
#now let us try our custom except...
silent = False
store_history = True
user_expressions = {}
allow_stdin = True
reply_content = {}
run_cell = <bound method InteractiveShell.run_cell_async of <...
should_run_async = <bound method InteractiveShell.should_run_async of...
shell = <ipykernel.zmqshell.ZMQInteractiveShell object at ...
Frame wrapper in /usr/local/lib/python3.8/dist-packages/tornado/gen.py at line 234
args = (<ipykernel.ipkernel.IPythonKernel object at 0x7f5...
kwargs = {}
future = <Future pending>
ctx_run = <built-in method run of Context object at 0x7f5c6c...
result = <generator object IPythonKernel.do_execute at 0x7f...
func = <function IPythonKernel.do_execute at 0x7f5c6f6978...
Frame execute_request in /usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py at line 540
self = <ipykernel.ipkernel.IPythonKernel object at 0x7f5c...
stream = <zmq.eventloop.zmqstream.ZMQStream object at 0x7f5...
ident = [b'e2e3826d25fb4c63876268cdc5a787ad']
parent = {'header': {'msg_id': '218114cb9837444cbd29466d87b...
content = {'code': '#collapse-output\n#now let us try our cu...
code = #collapse-output
#now let us try our custom except...
silent = False
store_history = True
user_expressions = {}
allow_stdin = True
stop_on_error = True
metadata = {'started': datetime.datetime(2022, 2, 14, 9, 30, ...
Frame wrapper in /usr/local/lib/python3.8/dist-packages/tornado/gen.py at line 234
args = (<ipykernel.ipkernel.IPythonKernel object at 0x7f5...
kwargs = {}
future = <Future pending>
ctx_run = <built-in method run of Context object at 0x7f5c6c...
result = <generator object Kernel.execute_request at 0x7f5c...
func = <function Kernel.execute_request at 0x7f5c6f747f70...
Frame dispatch_shell in /usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py at line 265
self = <ipykernel.ipkernel.IPythonKernel object at 0x7f5c...
stream = <zmq.eventloop.zmqstream.ZMQStream object at 0x7f5...
msg = {'header': {'msg_id': '218114cb9837444cbd29466d87b...
idents = [b'e2e3826d25fb4c63876268cdc5a787ad']
msg_type = execute_request
handler = <bound method Kernel.execute_request of <ipykernel...
Frame wrapper in /usr/local/lib/python3.8/dist-packages/tornado/gen.py at line 234
args = (<ipykernel.ipkernel.IPythonKernel object at 0x7f5...
kwargs = {}
future = <Future pending>
ctx_run = <built-in method run of Context object at 0x7f5c6f...
result = <generator object Kernel.dispatch_shell at 0x7f5c6...
func = <function Kernel.dispatch_shell at 0x7f5c6f7473a0>
Frame process_one in /usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py at line 362
self = <ipykernel.ipkernel.IPythonKernel object at 0x7f5c...
wait = True
priority = 10
t = 13
dispatch = <bound method Kernel.dispatch_shell of <ipykernel....
args = (<zmq.eventloop.zmqstream.ZMQStream object at 0x7f...
Frame run in /usr/local/lib/python3.8/dist-packages/tornado/gen.py at line 775
self = <tornado.gen.Runner object at 0x7f5c6c60f8e0>
future = None
exc_info = None
value = (10, 13, <bound method Kernel.dispatch_shell of <i...
Frame inner in /usr/local/lib/python3.8/dist-packages/tornado/gen.py at line 814
f = None
self = <tornado.gen.Runner object at 0x7f5c6c60f8e0>
Frame _run_callback in /usr/local/lib/python3.8/dist-packages/tornado/ioloop.py at line 741
self = <tornado.platform.asyncio.AsyncIOMainLoop object a...
callback = functools.partial(<function Runner.handle_yield.<l...
Frame <lambda> in /usr/local/lib/python3.8/dist-packages/tornado/ioloop.py at line 688
f = <Future finished result=(10, 13, <bound method...7...
callback = <function Runner.handle_yield.<locals>.inner at 0x...
future = <Future finished result=(10, 13, <bound method...7...
self = <tornado.platform.asyncio.AsyncIOMainLoop object a...
Frame _run in /usr/lib/python3.8/asyncio/events.py at line 81
self = <Handle IOLoop.add_future.<locals>.<lambda>(<Futur...
Frame _run_once in /usr/lib/python3.8/asyncio/base_events.py at line 1859
self = <_UnixSelectorEventLoop running=True closed=False ...
sched_count = 0
handle = <Handle IOLoop.add_future.<locals>.<lambda>(<Futur...
timeout = 0
event_list = []
end_time = 113697.83311910101
ntodo = 2
i = 0
Frame run_forever in /usr/lib/python3.8/asyncio/base_events.py at line 570
self = <_UnixSelectorEventLoop running=True closed=False ...
old_agen_hooks = asyncgen_hooks(firstiter=None, finalizer=None)
Frame start in /usr/local/lib/python3.8/dist-packages/tornado/platform/asyncio.py at line 199
self = <tornado.platform.asyncio.AsyncIOMainLoop object a...
old_loop = <_UnixSelectorEventLoop running=True closed=False ...
Frame start in /usr/local/lib/python3.8/dist-packages/ipykernel/kernelapp.py at line 612
self = <ipykernel.kernelapp.IPKernelApp object at 0x7f5c7...
Frame launch_instance in /usr/local/lib/python3.8/dist-packages/traitlets/config/application.py at line 845
cls = <class 'ipykernel.kernelapp.IPKernelApp'>
argv = None
kwargs = {}
app = <ipykernel.kernelapp.IPKernelApp object at 0x7f5c7...
Frame <module> in /usr/local/lib/python3.8/dist-packages/ipykernel_launcher.py at line 16
__name__ = __main__
__doc__ = Entry point for launching an IPython kernel.
This...
__package__ =
__loader__ = <_frozen_importlib_external.SourceFileLoader objec...
__spec__ = ModuleSpec(name='ipykernel_launcher', loader=<_fro...
__annotations__ = {}
__builtins__ = <module 'builtins' (built-in)>
__file__ = /usr/local/lib/python3.8/dist-packages/ipykernel_l...
__cached__ = /usr/local/lib/python3.8/dist-packages/__pycache__...
sys = <module 'sys' (built-in)>
app = <module 'ipykernel.kernelapp' from '/usr/local/lib...
Frame _run_code in /usr/lib/python3.8/runpy.py at line 87
code = <code object <module> at 0x7f5c7317e030, file "/us...
run_globals = {'__name__': '__main__', '__doc__': 'Entry point f...
init_globals = None
mod_name = __main__
mod_spec = ModuleSpec(name='ipykernel_launcher', loader=<_fro...
pkg_name =
script_name = None
loader = <_frozen_importlib_external.SourceFileLoader objec...
fname = /usr/local/lib/python3.8/dist-packages/ipykernel_l...
cached = /usr/local/lib/python3.8/dist-packages/__pycache__...
Frame _run_module_as_main in /usr/lib/python3.8/runpy.py at line 194
mod_name = ipykernel_launcher
alter_argv = 1
mod_spec = ModuleSpec(name='ipykernel_launcher', loader=<_fro...
code = <code object <module> at 0x7f5c7317e030, file "/us...
main_globals = {'__name__': '__main__', '__doc__': 'Entry point f...
###Markdown
Note the output from the first stack frame in the above stack trace. It is easy now to see (_items_) that we received in our function. The item at index _i_ is also available (333) on which our function crashed. Using our custom function unexpected errors are logged in a format that makes it a lot easier to find and fix the errors. Let's fix our function to handle unexpected integer values.
###Code
##
# let's fix our function to handle unexpected 'int' items by converting them to 'str'
def get_items_len(items: list) -> list:
"""
this function returns the length of items received in a list.
"""
items_len = []
for i in map(str, items):
items_len.append(len(i))
return items_len
# test it again
get_items_len(data)
###Output
_____no_output_____ |
Latest_Notebooks/NN_GGG.ipynb | ###Markdown
Feedforward neural network with regularizationNeural networks is a popular and powerful type of machine learning. This algorithm tries to model the way the brain thinks. There are many types of neural networks, in this notebook I'll implement a basic feedforward network as it is described in Andrew Ng's course on Coursera. The structure of this neural network is simple: input layer, one hidden layer and output layer. Regularization is added to prevent overfitting.
###Code
import numpy as np
import pandas as pd
import random
from scipy.special import expit
import scipy.optimize
from scipy.optimize import minimize
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
###Output
_____no_output_____
###Markdown
I add the same new features as in the other [notebook](http://nbviewer.jupyter.org/github/Erlemar/Erlemar.github.io/blob/master/Notebooks/GGG.ipynb).
###Code
train['hair_soul'] = train['hair_length'] * train['has_soul']
train['hair_bone'] = train['hair_length'] * train['bone_length']
test['hair_soul'] = test['hair_length'] * test['has_soul']
test['hair_bone'] = test['hair_length'] * test['bone_length']
train['hair_soul_bone'] = train['hair_length'] * train['has_soul'] * train['bone_length']
test['hair_soul_bone'] = test['hair_length'] * test['has_soul'] * test['bone_length']
X = np.array(train.drop(['id', 'color', 'type'], axis=1))
X = np.insert(X,0,1,axis=1)
X_test = np.array(test.drop(['id', 'color'], axis=1))
X_test = np.insert(X_test,0,1,axis=1)
Y_train = np.array((pd.get_dummies(train['type'], drop_first=False)).astype(float))
#I'll need this for predictions.
monsters = (pd.get_dummies(train['type'], drop_first=False)).columns
###Output
_____no_output_____
###Markdown
These are the parameters of neural network. I added additional column to variables as bias, so the input size is 8. Number of nodes in hidden layer is arbitraty, I chose 12 after some test. Params - random initial weights with the same size as the network.
###Code
hidden_size = 12
learning_rate = 1
params = (np.random.random(size=hidden_size * (X.shape[1]) + Y_train.shape[1] * (hidden_size + 1)) - 0.5)
###Output
_____no_output_____
###Markdown
Forwardpropagation. Input is multiplied by weights, after that goes hidden layer with sigmoid function and output with sigmoid function.
###Code
def forward_propagate(X, theta1, theta2):
z2 = X * theta1.T
a2 = np.insert(expit(z2), 0, 1, axis=1)
a3 = expit(a2 * theta2.T)
return z2, a2, a3
###Output
_____no_output_____
###Markdown
Backpropagation. "Going back" to minimize the error. And adding regularization here.
###Code
def back_propagate(X, y, theta1, theta2, z2, a2, a3):
D1 = np.zeros(theta1.shape)
D2 = np.zeros(theta2.shape)
for t in range(len(X)):
z2t = z2[t,:]
d3t = a3[t,:] - y[t,:]
z2t = np.insert(z2t, 0, values=1)
d2t = np.multiply((theta2.T * d3t.T).T, np.multiply(expit(z2t), (1 - expit(z2t))))
D1 += (d2t[:,1:]).T * X[t,:]
D2 += d3t.T * a2[t,:]
D1 = D1 / len(X)
D2 = D2 / len(X)
D1[:,1:] += (theta1[:,1:] * learning_rate) / len(X)
D2[:,1:] += (theta2[:,1:] * learning_rate) / len(X)
return D1, D2
###Output
_____no_output_____
###Markdown
Cost function. Convert input and output into matrixes. Divide params into thetas. Then forwardpropagate and calculate loss with regularization. After that backpropagate to minimize cost.
###Code
def cost(params, X, y, learningRate):
X = np.matrix(X)
y = np.matrix(y)
theta1 = np.matrix(np.reshape(params[:hidden_size * (X.shape[1])], (hidden_size, (X.shape[1]))))
theta2 = np.matrix(np.reshape(params[hidden_size * (X.shape[1]):], (Y_train.shape[1], (hidden_size + 1))))
z2, a2, a3 = forward_propagate(X, theta1, theta2)
J = 0
for i in range(len(X)):
first_term = np.multiply(-y[i,:], np.log(a3[i,:]))
second_term = np.multiply((1 - y[i,:]), np.log(1 - a3[i,:]))
J += np.sum(first_term - second_term)
J = (J + (float(learningRate) / 2) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))) / len(X)
#Backpropagation
D1, D2 = back_propagate(X, y, theta1, theta2, z2, a2, a3)
#Unravel the gradient into a single array.
grad = np.concatenate((np.ravel(D1), np.ravel(D2)))
return J, grad
#Simply to see that this works.
J, grad = cost(params, X, Y_train, 1)
J, grad.shape
#Minimizing function.
fmin = minimize(cost, x0=params, args=(X, Y_train, learning_rate), method='TNC', jac=True, options={'maxiter': 600})
#Get the optimized weights and use them to get output.
theta1 = np.matrix(np.reshape(fmin.x[:hidden_size * (X.shape[1])], (hidden_size, (X.shape[1]))))
theta2 = np.matrix(np.reshape(fmin.x[hidden_size * (X.shape[1]):], (Y_train.shape[1], (hidden_size + 1))))
z2, a2, a3 = forward_propagate(X, theta1, theta2)
#Prediction is in form of probabilities for each class. Get the class with highest probability.
def pred(a):
for i in range(len(a)):
yield monsters[np.argmax(a[i])]
prediction = list(pred(a3))
#Accuracy on training dataset.
accuracy = sum(prediction == train['type']) / len (train['type'])
print('accuracy = {0}%'.format(accuracy * 100))
#Predict on test set.
z2, a2, a3_test = forward_propagate(X_test, theta1, theta2)
prediction_test = list(pred(a3_test))
submission = pd.DataFrame({'id':test['id'], 'type':prediction_test})
submission.to_csv('GGG_submission.csv', index=False)
###Output
_____no_output_____ |
3_yolov5.ipynb | ###Markdown
###Code
!git clone https://github.com/ultralytics/yolov5 # clone repo
!pip install -qr yolov5/requirements.txt # install dependencies (ignore errors)
%cd yolov5
!pwd
import torch
from IPython.display import Image, clear_output # to display images
from utils.google_utils import gdrive_download # to download models/datasets
!python detect.py --source ../elephant2.png
from google.colab import drive
drive.mount('/content/drive')
!pwd
!python detect.py --source ../giraffe.jpg
!python detect.py --source ../20210608.mp4
###Output
_____no_output_____
###Markdown
###Code
!git clone https://github.com/ultralytics/yolov5 # clone repo
!pip install -qr yolov5/requirements.txt # install dependencies (ignore errors)
%cd yolov5
import torch
from IPython.display import Image, clear_output # to display images
from utils.google_utils import gdrive_download # to download models/datasets
!python detect.py --source ../elephant1.jpg
!python detect.py --source ../giraffe.jpg
!python detect.py --source ../park_bicycle.mp4
###Output
_____no_output_____
###Markdown
###Code
!git clone https://github.com/ultralytics/yolov5 # clone repo
!pip install -qr yolov5/requirements.txt # install dependencies (ignore errors)
%cd yolov5
import torch
from IPython.display import Image, clear_output # to display images
from utils.google_utils import gdrive_download # to download models/datasets
!python detect.py --source ../elephant1.jpg
!python detect.py --source ../giraffe.jpg
!python detect.py --source ../park_bicycle.mp4
###Output
_____no_output_____ |
01_analisis_y_visualizacion/Tarea integradora - parte 1.ipynb | ###Markdown
Diplomatura AACSyA 2018 - FaMAF - UNCAnálisis y visualización de datos Tarea integradora - Parte 1En esta notebook presentamos una serie de ejercicios para trabajar con variables aleatorias discretas, tanto númericas como cátegoricas, ejemplificando los conceptos de probabilidad explicados en clase. Lo primero es determinar si todas las librerías están correctamente instaladas.
###Code
# %matplotlib notebook
%matplotlib inline
import pandas
import numpy
import seaborn
seaborn.set_style('whitegrid')
seaborn.set_context('talk')
###Output
_____no_output_____
###Markdown
Cargar el dataset en un dataframe de pandasLos dataframes son tablas con un montón de operaciones ya programadas. Prácticamente todas las funciones de SQL se pueden realizar en pandas con pocas líneas. Además de eso, tiene funciones para leer y escribir archivos en numerosos formatos.Ahora leemos directamente el archivo .csv
###Code
print(pandas)
dataset = pandas.read_csv('https://cs.famaf.unc.edu.ar/~mteruel/datasets/diplodatos/violencia-institucional-2018-01.csv',
encoding='utf8')
dataset[:3]
###Output
_____no_output_____
###Markdown
Los valores para los años no tienen un formato consistente en todas las filas, por lo que los procesaremos antes de comenzar. Los valores de año faltantes son reemplazados por NaN, y como la representación de enteros de numpy no soporta este tipo de valores, convertiremos cada año a tipo numpy.float. Otras decisiones pueden ser tomadas en este punto, como reemplazar los valores faltantes por 0 o elminarlos del dataset, dependiendo del tipo de datos que se requiera.También cambiaremos el nombre de la columna año, ya que el caracter ñ no permite utilizar el syntax sugar de pandas donde se obtienen los valores de la columna con la sintáxis de llamar a un método.
###Code
import six
def convert_to_int(row):
try:
return float(row)
except ValueError:
if isinstance(row, six.string_types):
return float(row.split(',')[0])
return row
dataset.loc[:,'year'] = dataset[u'año'].apply(convert_to_int)
# Se podrían también decodicar los nombres de provincias
# dataset.loc[:,'provincia'] = dataset.provincia.apply(lambda x: x.decode("utf-8"))
dataset.shape, dataset.year.dropna().shape
dataset[:0]
###Output
_____no_output_____
###Markdown
--- Ejercicios Ejercicio 1* Crear una imagen mostrando la distribución de los eventos por año* Construir una tabla de frecuencia de probabilidades dado el evento Y = ’año en el que ocurrió’$$fdp(Y)$$* Crear una imagen de dichas probabilidades con el gráfico que considere adecuado. ¿Son ambos gráficos iguales? ¿Cuál considera que es más útil?* ¿A qué se debe la forma de la distribución? ¿Qué tendencia muestra? ¿Ha aumentado la violencia institucional en los últimos años?* Calcular la función de distribución acumulada de la variable Y y graficarla. ¿Cuál es la probabilidad de que haya dado un acto de violencia antes del último cambio de gobierno?IMPORTANTE: ¿Qué hacemos con los datos faltantes?
###Code
#fdp = dataset.groupby(by=['year']).size().sort_values(ascending=False)
#seaborn.distplot(a=fdp, axlabel="year")
fdp = dataset.year.dropna()
f = fdp.value_counts().reset_index(name='freq').rename(columns={'index': 'year'})
f
p = seaborn.barplot(x=f.year, y=f.freq)
p.set_xticklabels(labels=f["year"].value_counts().index.tolist(), rotation=90)
p
fdp.value_counts(normalize=True)
#dataset.groupby(by=['year'], as_index=False).count().value_counts(normalize=True)
#fdp.sort_values(by=['year'], ascending=False)
seaborn.distplot(a=fdp, axlabel="year")
cf = dataset.year.dropna()
cf = cf.value_counts(normalize=True).sort_index().cumsum().reset_index(name='cf').rename(columns={'index': 'year'})
cf
cf_barplot = seaborn.barplot(x=cf.year, y=cf.cf)
cf_barplot.set_xticklabels(labels=f["year"].value_counts().index.tolist(), rotation=90)
cf_barplot
###Output
_____no_output_____ |
nlp/sentiment.ipynb | ###Markdown
감정 분석항공사 리뷰 데이터로 감정 분석하기 (트위터에 데이터 사용)
###Code
import pandas as pd
import re, os
pd.set_option('display.max_colwidth', 240)
PATH = '../data/'
FILE = os.path.join(PATH, 'tweets_public.csv')
tweets = pd.read_csv(FILE)
tweets.sample(1)
# text 정제를 위해서 text 열만 골라냄
tweets = tweets.loc[:, ['text', 'airline_sentiment']]
tweets.sample(5)
# @장소를 다른 열로 저장하기
tweets['at'] = tweets.text.str.extract(r'^(@\S+)')
tweets.sample(5)
# 텍스트에서 hashtag, @, http 제거하기
def remove_handles(text):
return re.sub(r'@\S+|https://\S+|\#', '', text) # @, hashtag, address 삭제
tweets.text = tweets.text.apply(remove_handles)
tweets.sample(10)
###Output
_____no_output_____
###Markdown
감정분석- textblob: 영어밖에 지원이 안 됨. 스페인어->영어 변환 후 사용- sentiment_analysis_spanish
###Code
from textblob import TextBlob
# es -> en 변환
def get_tweet_sentiment(tweet):
analysis = TextBlob(tweet)
language = analysis.detect_language()
if language == 'en':
analysis_ready = analysis
else: # 영어가 아닌 언어는 영어로 바꿈
analysis_ready = analysis.translate(to='en')
if analysis_ready.sentiment.polarity > 0: # polarity를 측정하는 메소드를 가짐
return 'positive'
elif analysis_ready.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
# 예시
sentence = 'I hate New York.'
blob = TextBlob(sentence)
blob.sentiment # [-1, 1]
# 랜덤으로 한 문장 뽑기
sentence = tweets.text.sample(1).tolist()
sentence
get_tweet_sentiment(sentence[0])
###Output
_____no_output_____
###Markdown
sentiment_analysis_spanish0~1로 표현. neg-pos
###Code
from sentiment_analysis_spanish import sentiment_analysis
#test
senti_analizer = sentiment_analysis.SentimentAnalysisSpanish()
print(senti_analizer.sentiment("Me gusta la tombola. Es genial.")) # positive
tweets['polarity'] = tweets.text.apply(senti_analizer.sentiment)
tweets.head(3)
# sorting
tweets.sort_values(by=['polarity'], ascending=False).head(10)
#option 1. polarity 점수 구간에 따라 레이블을 positive, negative로 나누기 (2가지)
tweets['label'] = tweets.polarity.apply(lambda x: 'positive' if x >= 0.5 else 'negative')
tweets.head(5)
#option 2. airline_sentiment에 따라 나누기 (3가지)
def tag_label(sentiment):
sentiment = sentiment.lower()
if sentiment == 'neutral':
return 0
elif sentiment == 'positive':
return 1
else:
return 2
tweets['label'] = tweets.airline_sentiment.apply(tag_label)
tweets.head(5)
###Output
_____no_output_____
###Markdown
visualization
###Code
import matplotlib.pyplot as plt
import seaborn as sns
tweets.label.value_counts() # 주로 부정적인 데이터가 많음.
fig, axe = plt.subplots(ncols=1)
fig.set_size_inches(10, 3)
sns.countplot(tweets.label)
plt.show()
# 파일로 저장
out_file = os.path.join(PATH, 'tweets_public_polarity.csv')
tweets.to_csv(out_file, sep='\t', encoding='utf-8')
###Output
_____no_output_____ |
04 Exercise Classification MNIST.ipynb | ###Markdown
Exercise: Classification of MNIST (10 points)Die goal of this exercise is to create a simple image classification network and to work on the improvement of a model and how to debug and check the training data. We start with a simple CNN model for digit classification of the MNIST dataset [1]. This dataset contains 60,000 scans of digits for training and 10,000 scans of digits for validation. A sample consists of 28x28 features with values between 0 and 255, note that the features are inverted. Actually digits are rather dark on a light background. MNIST digits are light on a dark background.This example is partly based on a tutorial by Jason Brownlee [2]. Please follow the instructions in the notebook.```[1] http://yann.lecun.com/exdb/mnist/[2] https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-from-scratch-for-mnist-handwritten-digit-classification/```**NOTE**Document your results by simply adding a markdown cell or a python cell (as comment) and writing your statements into this cell. For some tasks the result cell is already available.[](https://colab.research.google.com/github/ditomax/mlexercises/blob/master/04%20Exercise%20Classification%20MNIST.ipynb)
###Code
#
# Prepare colab
#
COLAB=False
try:
%tensorflow_version 2.x
print("running on google colab")
COLAB=True
except:
print("not running on google colab")
#
# Turn off errors and warnings (does not work sometimes)
#
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=Warning)
#
# Import some modules
#
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import model_from_json
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#
# Diagram size
#
plt.rcParams['figure.figsize'] = [16, 9]
#
# nasty hack for macos
#
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#
# check version of tensorflow
#
print('starting notebook with tensorflow version {}'.format(tf.version.VERSION))
###Output
_____no_output_____
###Markdown
Load and prepare data
###Code
#
# Loading of the data (very simplified) with split into train and test data (fixed split)
#
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#
# Check some data rows
#
x_train[0][10]
#
# Print shapes of data
#
print('training data: X=%s, y=%s' % (x_train.shape, y_train.shape))
print('test data: X=%s, y=%s' % (x_test.shape, y_test.shape))
#
# Display some examples of the data
#
for i in range(15):
plt.subplot(4,4,1 + i)
plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))
plt.show()
#
# Display labels of some data
#
for i in range(15):
print('label {}'.format(y_train[i]))
###Output
_____no_output_____
###Markdown
TaskPlot a histogram of the classes of the training data (1 point). After plotting, give a short estimation if this distribution is OK for use in a classification situation.
###Code
#
# Histogram of class counts (digits)
#
# Task: plot the histogram as array or as plot
#
###Output
_____no_output_____
###Markdown
Prepare data for classification TaskFind out why the unusual shap of the input data is required? Why is (-1,28,28) not sufficien? (1 point) Give a short description here in the comment.Hint: check the tensorflow keras documentation about 2D cnn layer.
###Code
#
# Change shape of data for model
#
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
#
# your anser here
#
#
# Scale pixel values into range of 0 to 1
#
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
# check one transformed sample row
x_train[0][10]
#
# One-hot encoding for classes
#
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
# check the one-hot encoding
y_train
###Output
_____no_output_____
###Markdown
Build the first model TaskComplete the code for a simple convolutional neural network (CNN) with one CNN layer (2 Points).Hint: look for examples in the internet or in the slides.
###Code
model = Sequential()
...
model.add(Dense(10, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# get a short summary of the model
model.summary()
# train model
history = model.fit(x_train, y_train, batch_size=128, epochs=5 )
###Output
_____no_output_____
###Markdown
First prediction with model TaskDescribe the meaning of the numbers returned from the prediction. (1 point)Write your findings here in the commentsHint: look at the definition of the output layer (last layer) in the model.
###Code
model.predict(x_train[:1])
# compare with expexted result
y_train[:1]
#
# Measure the accuracy
#
_, acc = model.evaluate(x_test, y_test, verbose=0)
print('accuracy {:.5f}'.format(acc))
#
# Estimate the number of false classifications in production use
#
print('with {} samples there are about {:.0f} false classifications to expect.'.format( x_test.shape[0], (x_test.shape[0]*(1-acc))))
###Output
_____no_output_____
###Markdown
Print out training progress
###Code
#
# Plot loss and accuracy
#
def summarize_diagnostics(history,modelname):
plt.subplot(211)
plt.title('Cross Entropy Loss')
plt.plot(history.history['loss'], color='blue', label='train')
plt.subplot(212)
plt.title('Classification Accuracy')
plt.plot(history.history['accuracy'], color='green', label='train')
plt.subplots_adjust(hspace=0.5)
plt.savefig( 'results/' + modelname + '_plot.png')
plt.show()
plt.close()
summarize_diagnostics(history,'03_model1')
###Output
_____no_output_____
###Markdown
Improve the model significantly TaskYour customer requires to have less than 1% of wrong classifications. Start to build a better model with significantly less than 100 wrong classifications in the 10000 test samples. Research the internet for the optimal model setup for MNIST classification and try to replicate this model here. Make sure to document the source where you found the hints for the improvement (links to sources) (2 Points).
###Code
#
# Setup new model
#
def create_model_2():
model = Sequential()
...
model.add(Dense(10, activation='softmax'))
return model
#
# instantiate model
#
model2 = create_model_2()
#
# compile
#
model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
#
# train with history
#
history = model2.fit(x_train, y_train, batch_size=128, epochs=15 )
#
# Measure the accuracy
#
_, acc = model2.evaluate(x_test, y_test, verbose=0)
print('Accuracy {:.5f}'.format(acc))
#
# Estimate the number of false classifications in production use
#
print('with {} samples there are about {:.0f} false classifications to expect.'.format( x_test.shape[0], (x_test.shape[0]*(1-acc))))
# Result: (describe where you found the hints for improvement and how much it improved)
model2.summary()
summarize_diagnostics(history,'03_model2')
###Output
_____no_output_____
###Markdown
Save the model
###Code
#
# Save a model for later use
#
prefix = 'results/03_'
modelName = prefix + "model.json"
weightName = prefix + "model.h5"
# set to True if the model should be saved
save_model = True
if save_model:
model_json = model2.to_json()
with open( modelName , "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model2.save_weights( weightName )
print("saved model to disk as {} {}".format(modelName,weightName))
else:
# load model (has to be saved before, model is not part of git)
json_file = open(modelName, 'r')
loaded_model_json = json_file.read()
json_file.close()
model2 = model_from_json(loaded_model_json)
# load weights into new model
model2.load_weights(weightName)
print("loaded model from disk")
###Output
_____no_output_____
###Markdown
Find characteristics in the errors of the model TaskThere are still too many false classifications using the model. Evaluate all test data and plot examples of failed classifications to get a better undestanding what goes wring. Plot a confusion matrix to get a better insight. (1 Point).
###Code
y_test_predictions = model2.predict(x_test)
#
# generate confusion matrix
# Task: find a suitable function for generating a confusion matrix as array
#
confusion = ...
print(confusion)
# make a nice plot of the confusion matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# plot confusion matrix
plot_confusion_matrix(confusion,['0','1','2','3','4','5','6','7','8','9'] )
###Output
_____no_output_____
###Markdown
Improve the training Beside many other options, there are two streight forward ways to improve your model:1. Add more data for those classes which are poorely classified1. Add augmentation for the training dataImplement the augmentation strategy and test if there is an improvement. Augmentation TaskTask: Search the internet for the ImageDataGenerator class of the Keras framework and implement such a generator for the training of the model. Select suitable augmentation which fits to the use-case. Document the resulting accuracy. (2 Points)
###Code
# Augmentation solution
...
# instantiate model
model3 = create_model_2()
# Training
...
#
# Evaluierung
#
_, acc = model3.evaluate(x_test, y_test, verbose=0)
print('accuracy {:.3f} '.format(acc) )
summarize_diagnostics(history,'03_model3')
y_test_predictions = model3.predict(x_test)
# generate confusion matrix
confusion = confusion_matrix(np.argmax(y_test,axis=1), np.argmax(y_test_predictions,axis=1))
# plot confusion matrix
plot_confusion_matrix(confusion,['0','1','2','3','4','5','6','7','8','9'] )
###Output
_____no_output_____ |
3. NLP/AZ/Text Generation/04_nlp_constructing_text_generation_model.ipynb | ###Markdown
Text Generation
###Code
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
###Output
_____no_output_____
###Markdown
Helping Functions
###Code
def create_lyrics_corpus(dataset, field):
# Remove all other punctuation
dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')
# Make it lowercase
dataset[field] = dataset[field].str.lower()
# Make it one long string to split by line
lyrics = dataset[field].str.cat()
corpus = lyrics.split('\n')
# Remove any trailing whitespace
for l in range(len(corpus)):
corpus[l] = corpus[l].rstrip()
# Remove any empty lines
corpus = [l for l in corpus if l != '']
return corpus
def tokenize_corpus(corpus, num_words=-1):
# Fit a Tokenizer on the corpus
if num_words > -1:
tokenizer = Tokenizer(num_words=num_words)
else:
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
return tokenizer
###Output
_____no_output_____
###Markdown
Step 1 : Get the Corpus
###Code
# Read the dataset from csv - just first 10 songs for now
path = tf.keras.utils.get_file('songdata.csv',
'https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8')
print (path)
dataset = pd.read_csv(path, dtype=str)[:10]
corpus = create_lyrics_corpus(dataset, 'text')
###Output
_____no_output_____
###Markdown
Step 2 : Tokenize the Corpus
###Code
# Tokenize the corpus
tokenizer = tokenize_corpus(corpus)
total_words = len(tokenizer.word_index) + 19 # why 19?
#print(tokenizer.word_index)
print(total_words)
dataset.head()
###Output
_____no_output_____
###Markdown
Step 3 : Create n-Gram
###Code
sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
sequences.append(n_gram_sequence)
###Output
_____no_output_____
###Markdown
Step 4 : Pad sequences
###Code
# Pad sequences for equal input length
max_sequence_len = max([len(seq) for seq in sequences])
sequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))
###Output
_____no_output_____
###Markdown
Step 5 : X and y - Values
###Code
# Split sequences between the "input" sequence and "output" predicted word
X = sequences[:,:-1]
y_label = sequences[:,-1]
# One-hot encode the labels
y = tf.keras.utils.to_categorical(y_label, num_classes = total_words)
###Output
_____no_output_____
###Markdown
Explore and Trace
###Code
# Check out how some of our data is being stored
# The Tokenizer has just a single index per word
print(tokenizer.word_index['know'])
print(tokenizer.word_index['feeling'])
# Input sequences will have multiple indexes
print(X[5])
print(X[6])
# And the one hot labels will be as long as the full spread of tokenized words
print(y[5])
print(y[6])
###Output
32
97
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 81 82 142 197 29
4]
[ 0 0 0 0 0 0 0 0 0 0 0 0 81 82 142 197 29 4
287]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0.]
###Markdown
Step 6 : Create Model
###Code
model = Sequential()
model.add(Embedding(total_words, 64, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(20)))
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X, y, epochs=200, verbose=1)
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Step 7 : Generate Text
###Code
seed_text = "im feeling chills"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = np.argmax(model.predict(token_list), axis=-1)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
###Output
im feeling chills your music and i do what a walk in my stuff tomorrow talking walk am am am am am more more more more more more more slack seems but no blown thought thought morning thought thought morning closed would weave realized its new new found new love found true crazy wonderful plan shoulder hour am am am am more more more more more more slack more more so seems figure quiet here i realized realized realized its blue think do crazy wonderful didnt do may just advice dreamed realized dreamed chiquitita true shoulder new to dont thought would realized do
|
exercises/ch10.ipynb | ###Markdown
Load data
###Code
from sklearn.metrics import precision_score
from sklearn.model_selection import train_test_split
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
n_rows, n_cols = 4, 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(x_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.show()
# pd.Series(x_train[0].ravel()).value_counts()
np.unique(x_train[0])
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Build basic model
###Code
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=x_train.shape[1:], name='input'))
for idx in range(3):
model.add(keras.layers.Dense(100, activation='elu', name=f'dense_{idx+1}'))
model.add(keras.layers.Dense(10, activation='softmax', name='output'))
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=30,
validation_data=(x_valid, y_valid))
pd.DataFrame(history.history).plot()
plt.grid(True)
plt.ylim(0,1)
plt.show()
###Output
_____no_output_____
###Markdown
The model either overfits, or can't converge due to large learning rate or not enough epochs.
###Code
model.evaluate(x_test, y_test)
###Output
313/313 [==============================] - 0s 1ms/step - loss: 0.1456 - accuracy: 0.9787
###Markdown
Improve model Things to add- add earlystop & tensorboard callbacks- explore learning rate 1. group lr exponentially 2. plot the loss 3. find the point where loss shoots up
###Code
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
from functools import partial
EluDense = partial(keras.layers.Dense,
activation='elu',
kernel_initializer = 'he_normal')
# model = keras.Sequential()
# model.add(keras.layers.Flatten(input_shape=x_train.shape[1:], name='input'))
# model.add(EluDense(units=300))
# model.add(EluDense(units=100))
# model.add(keras.layers.Dense(10, activation='softmax', name='output'))
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=x_train.shape[1:], name='input'))
for idx in range(3):
model.add(keras.layers.Dense(100, activation='elu', name=f'dense_{idx+1}'))
model.add(keras.layers.Dense(10, activation='softmax', name='output'))
initial_lr = 1e-3
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=initial_lr),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Define custom callback to change lr per batch
###Code
np.exp(np.log(1e6)/300)
K = keras.backend
class UpdateLearningRateCallback(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs['loss']) # why training loss not validation loss?
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
growth_factor = 1.005
expon_lr_cb = UpdateLearningRateCallback(factor=growth_factor)
###Output
_____no_output_____
###Markdown
Run for one epoch and find a better lr Fit the model for one epoch and check the loss per batch to find a good learning rate
###Code
history = model.fit(x_train, y_train, epochs=1,
validation_data=(x_valid, y_valid),
callbacks=[expon_lr_cb])
x_seq = np.arange(0, len(x_train) / 32 + 1, 10)
lr_seq = initial_lr * np.power(growth_factor, x_seq)
plt.plot(x_seq, lr_seq)
plt.title('learning rate growth')
plt.xlabel('after x batches')
plt.ylabel('learning_rate')
###Output
_____no_output_____
###Markdown
We can plot the loss as a function of learning rate.
###Code
lowest_loss_obtained_at = np.argmin(expon_lr_cb.losses)
plt.plot(expon_lr_cb.rates, expon_lr_cb.losses)
plt.gca().set_xscale('log')
plt.hlines(min(expon_lr_cb.losses), expon_lr_cb.rates[0], expon_lr_cb.rates[-1], color='g', ls='--',
label = f'best lr = {expon_lr_cb.rates[lowest_loss_obtained_at]:.3f}\nlowest loss = {min(expon_lr_cb.losses):.3f}')
plt.legend(loc='upper left')
plt.xlabel('learning rate log scaled')
plt.ylabel('loss')
plt.show()
###Output
_____no_output_____
###Markdown
The loss shoots up around 0.1. So lets try using 0.01 as our learning rate. (a value somewhat smaller than the shotback lr, usually 10 times smaller) Fit again with new lr and extra callbacks
###Code
keras.backend.clear_session()
np.random.seed(27)
tf.random.set_seed(27)
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=x_train.shape[1:], name='input'))
for idx in range(3):
model.add(keras.layers.Dense(100, activation='elu', name=f'dense_{idx+1}'))
model.add(keras.layers.Dense(10, activation='softmax', name='output'))
better_lr = 0.01
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.Adam(learning_rate=better_lr),
metrics=['accuracy'])
run_idx = 1
log_dir = os.path.join(os.curdir, 'my_logs', 'run_{:03d}'.format(run_idx))
tensorboard_cb = keras.callbacks.TensorBoard(log_dir)
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True)
history = model.fit(x_train, y_train, epochs=100,
validation_data=(x_valid, y_valid),
callbacks=[tensorboard_cb, early_stopping_cb, checkpoint_cb])
###Output
Epoch 1/100
1/1500 [..............................] - ETA: 0s - loss: 0.1026 - accuracy: 0.9688WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0014s vs `on_train_batch_end` time: 0.0044s). Check your callbacks.
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1546 - accuracy: 0.9690 - val_loss: 0.2641 - val_accuracy: 0.9499
Epoch 2/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1365 - accuracy: 0.9727 - val_loss: 1.2570 - val_accuracy: 0.9488
Epoch 3/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.3136 - accuracy: 0.9601 - val_loss: 0.2198 - val_accuracy: 0.9608
Epoch 4/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2099 - accuracy: 0.9706 - val_loss: 0.4109 - val_accuracy: 0.9535
Epoch 5/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1516 - accuracy: 0.9712 - val_loss: 0.2872 - val_accuracy: 0.9506
Epoch 6/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.7057 - accuracy: 0.9645 - val_loss: 0.3100 - val_accuracy: 0.9462
Epoch 7/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1518 - accuracy: 0.9720 - val_loss: 0.2764 - val_accuracy: 0.9617
Epoch 8/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1402 - accuracy: 0.9729 - val_loss: 0.3073 - val_accuracy: 0.9653
Epoch 9/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2232 - accuracy: 0.9666 - val_loss: 0.3729 - val_accuracy: 0.9461
Epoch 10/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1395 - accuracy: 0.9730 - val_loss: 0.2698 - val_accuracy: 0.9617
Epoch 11/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.5190 - accuracy: 0.9633 - val_loss: 1.3003 - val_accuracy: 0.9638
Epoch 12/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2878 - accuracy: 0.9709 - val_loss: 0.5149 - val_accuracy: 0.9549
Epoch 13/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2785 - accuracy: 0.9673 - val_loss: 0.4370 - val_accuracy: 0.9567
Epoch 14/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1676 - accuracy: 0.9711 - val_loss: 0.4430 - val_accuracy: 0.9563
Epoch 15/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2471 - accuracy: 0.9688 - val_loss: 0.7346 - val_accuracy: 0.9049
Epoch 16/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1989 - accuracy: 0.9609 - val_loss: 0.5241 - val_accuracy: 0.9528
Epoch 17/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2107 - accuracy: 0.9616 - val_loss: 0.5015 - val_accuracy: 0.9363
Epoch 18/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.3431 - accuracy: 0.9582 - val_loss: 0.7952 - val_accuracy: 0.9077
Epoch 19/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.3088 - accuracy: 0.9409 - val_loss: 0.5291 - val_accuracy: 0.9365
Epoch 20/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1886 - accuracy: 0.9640 - val_loss: 0.6158 - val_accuracy: 0.9384
Epoch 21/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2749 - accuracy: 0.9602 - val_loss: 0.6111 - val_accuracy: 0.9482
Epoch 22/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.1748 - accuracy: 0.9633 - val_loss: 0.8266 - val_accuracy: 0.9515
Epoch 23/100
1500/1500 [==============================] - 2s 1ms/step - loss: 0.2366 - accuracy: 0.9581 - val_loss: 1.6410 - val_accuracy: 0.9350
###Markdown
**I am quite lost. How can validate accuracy be so much better than test accuracy?**
###Code
model = keras.models.load_model("my_mnist_model.h5") # rollback to best model
model.evaluate(x_test, y_test)
###Output
313/313 [==============================] - 0s 728us/step - loss: 0.2382 - accuracy: 0.0947
|
HerosOfPymoli/.ipynb_checkpoints/HeroesOfPymoli_starter-checkpoint.ipynb | ###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
###Output
_____no_output_____
###Markdown
Player Count * Display the total number of players
###Code
#assinging a column SN to count
TNP =len(purchase_data["SN"].value_counts())
#make DF
pc = pd.DataFrame({"Total Players":[TNP]})
pc
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Total) * Run basic calculations to obtain number of unique items, average price, etc.* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
#calc
unique_items = len((purchase_data)["Item ID"].unique())
avg_price = (purchase_data["Price"]).mean()
total_purchases = (purchase_data["Purchase ID"]).count()
total_rev = (purchase_data["Price"]).sum()
#Summary DF
summary_df = pd.DataFrame({"Number of Unique Items": [unique_items],
"Average Price":[avg_price],
"Number of Purchases":[total_purchases],
"Total Revenue":[total_rev]})
#optional add $ and only 2 decimal places MAPPING
summary_df["Average Price"]= summary_df["Average Price"].map("${:.2f}".format)
summary_df["Total Revenue"]= summary_df["Total Revenue"].map("${:.2f}".format)
summary_df
###Output
_____no_output_____
###Markdown
Gender Demographics * Percentage and Count of Male Players* Percentage and Count of Female Players* Percentage and Count of Other / Non-Disclosed
###Code
#DF with Unique Values
unique_df = purchase_data.loc[:, ["Gender", "SN", "Age"]].drop_duplicates()
# Identifying Gender Numbers
GD = unique_df["Gender"].value_counts()
GD
#percentage
gender_percent = GD/TNP *100.
gender_percent
#DF
GD_df= pd.DataFrame({"Total Count": GD,
"Percentage of Players":gender_percent})
#add percent sign and round
GD_df["Percentage of Players"]= GD_df["Percentage of Players"].map("{:.2f}%".format)
GD_df
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Gender) * Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
#calculations
purchase_count = purchase_data.groupby(["Gender"]).count()["Price"]
avg_purchase_price = purchase_data.groupby(["Gender"]).mean()["Price"]
TPV = purchase_data.groupby(["Gender"]).sum()["Price"]
avg_total = TPV/GD
avg_total
#DF
Gender_df = pd.DataFrame({"Purchase Count": purchase_count,
"Average Purchase Price":avg_purchase_price,
"Total Purchase Value":TPV,
"Avg Total Purchase per Person":avg_total})
#Add $ and two decimal places
Gender_df["Average Purchase Price"]= Gender_df["Average Purchase Price"].map("${:.2f}".format)
Gender_df["Total Purchase Value"]= Gender_df["Total Purchase Value"].map("${:.2f}".format)
Gender_df["Avg Total Purchase per Person"]= Gender_df["Avg Total Purchase per Person"].map("${:.2f}".format)
Gender_df
###Output
_____no_output_____
###Markdown
Age Demographics * Establish bins for ages* Categorize the existing players using the age bins. Hint: use pd.cut()* Calculate the numbers and percentages by age group* Create a summary data frame to hold the results* Optional: round the percentage column to two decimal points* Display Age Demographics Table
###Code
#creating bins
age_bins = [0, 9.9, 14.9,19.9,24.9,29.9,34.9, 39.9, 9999999]
group_name= ["<10", "10-14","15-19","20-24", "25-29", "30-34", "35-39", "40+"]
unique_df["Age Ranges"]= pd.cut(unique_df["Age"],age_bins,labels = group_name)
ADT= unique_df["Age Ranges"].value_counts()
Demo_percent = ADT/TNP *100
Demo_percent
#DF
demo_df = pd.DataFrame({"Total Count": ADT,
"Percentage of Players":Demo_percent,
})
#sortindex
demo_df=demo_df.sort_index()
demo_df
#apply format
demo_df["Percentage of Players"]= demo_df["Percentage of Players"].map("{:.2f}%".format)
demo_df
###Output
_____no_output_____
###Markdown
Purchasing Analysis (Age) * Bin the purchase_data data frame by age* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below* Create a summary data frame to hold the results* Optional: give the displayed data cleaner formatting* Display the summary data frame
###Code
#binning
bins = [0, 9.90, 14.90, 19.90, 24.90, 29.90, 34.90, 39.90, 99999]
bin_labels = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
purchase_data["Age Ranges"] = pd.cut(purchase_data["Age"], bins, labels= bin_labels)
APC = purchase_data.groupby(["Age Ranges"])["Price"].count()
AAP = purchase_data.groupby(["Age Ranges"])["Price"].mean()
APV = purchase_data.groupby(["Age Ranges"])["Price"].sum()
APTPP = APV/demo_df["Total Count"]
#df
APA_df = pd.DataFrame({"Purchase Count": APC,
"Average Purchase Price":AAP,
"Total Purchase Value": APV,
"Avg Total Purchase Per Person": APTPP})
#add fomrating round and $
APA_df["Average Purchase Price"]= APA_df["Average Purchase Price"].map("${:.2f}".format)
APA_df["Total Purchase Value"]= APA_df["Total Purchase Value"].map("${:.2f}".format)
APA_df["Avg Total Purchase Per Person"]= APA_df["Avg Total Purchase Per Person"].map("${:.2f}".format)
APA_df
###Output
_____no_output_____
###Markdown
Top Spenders * Run basic calculations to obtain the results in the table below* Create a summary data frame to hold the results* Sort the total purchase value column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame
###Code
top_spenders= purchase_data.groupby("SN")
#calc
TPCount = top_spenders["Purchase ID"].count()
AverageSP = top_spenders["Price"].mean()
TotalSP = top_spenders["Price"].sum()
#df
ts_df = pd.DataFrame({"Purchase Count": TPCount,
"Average Purchase Price":AverageSP,
"Total Purchase Value": TotalSP,
})
#Formatting
t5s_df = ts_df.sort_values(["Total Purchase Value"], ascending=False).head(5)
#more formatting
t5s_df["Average Purchase Price"]= t5s_df["Average Purchase Price"].map("${:.2f}".format)
t5s_df["Total Purchase Value"]= t5s_df["Total Purchase Value"].map("${:.2f}".format)
t5s_df
###Output
_____no_output_____
###Markdown
Most Popular Items * Retrieve the Item ID, Item Name, and Item Price columns* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value* Create a summary data frame to hold the results* Sort the purchase count column in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the summary data frame
###Code
most_popular = purchase_data.groupby(["Item ID", "Item Name"])
#calc
pop_pc = most_popular["Price"].count()
pop_pv = most_popular["Price"].sum()
pop_ip = pop_pv / pop_pc
#df
popular_df = pd.DataFrame({"Purchase Count": pop_pc,
"Item Price": pop_ip,
"Total Purchase Value": pop_pv
})
mostpopular_df = popular_df.sort_values(["Purchase Count"],ascending=False).head(5)
#formatting
mostpopular_df["Item Price"]= mostpopular_df["Item Price"].map("${:.2f}".format)
mostpopular_df["Total Purchase Value"]= mostpopular_df["Total Purchase Value"].map("${:.2f}".format)
mostpopular_df
###Output
_____no_output_____
###Markdown
Most Profitable Items * Sort the above table by total purchase value in descending order* Optional: give the displayed data cleaner formatting* Display a preview of the data frame
###Code
mostpopular_df = popular_df.sort_values(["Total Purchase Value"], ascending=False).head()
mostpopular_df
#Formatt
mostpopular_df["Item Price"]= mostpopular_df["Item Price"].map("${:.2f}".format)
mostpopular_df["Total Purchase Value"]= mostpopular_df["Total Purchase Value"].map("${:.2f}".format)
mostpopular_df
###Output
_____no_output_____ |
exercises/Analyzing the community activity for version control systems.ipynb | ###Markdown
Analyzing the community activity for version control systems Exercise BackgroundTechnology choices are different. There may be objective reasons for technology at a specific time. But those reasons often change over time. But the developed deep love for an now outdated technology can prevent every progress. Thus objective reasons may become subjective which can create a toxic environment when technology updates are addressed. Your taskYou are a new team member in a software company. The developers there are using a version control system ("VCS" for short) called CVS (Concurrent Versions System). Some want to migrate to a better VCS. They prefer one that's called SVN (Subversion). You are young but not inexperienced. You heard about newer version control system named "Git". So you propose Git as an alternative to the team. They are very sceptical about your suggestion. Find evidence that shows that the software development community is mainly adopting the Git version control system! The datasetThere is a dataset from the online software developer community Stack Overflow in `../datasets/stackoverflow_vcs_data_subset.gz` (with a subset of columns) available with the following data:* `CreationDate`: the timestamp of the creation date of a Stack Overflow post (= question)* `TagName`: the tag name for a technology (in our case for only 4 VCSes: "cvs", "svn", "git" and "mercurial")* `ViewCount`: the numbers of views of a post Your solution Step 1: Load in the dataset SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
import pandas as pd
vcs_data = pd.read_csv('../datasets/stackoverflow_vcs_data_subset.gz')
vcs_data.head()
###Output
_____no_output_____
###Markdown
Step 2: Explore the dataset by displaying the number of all posts for each VCS SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
vcs_data['TagName'].value_counts()
###Output
_____no_output_____
###Markdown
Step 3: Convert the column with the time stamp SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
vcs_data['CreationDate'] = pd.to_datetime(vcs_data['CreationDate'])
vcs_data.head()
###Output
_____no_output_____
###Markdown
Step 4: Sum up the view counts by the timestamp and the VCSes SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
number_of_posts = vcs_data.groupby(['CreationDate', 'TagName']).sum()
number_of_posts.head()
###Output
_____no_output_____
###Markdown
Step 5: List the number of views by date for each VCSHint: You may unstack and fill in the data (and get rid of the hierarchical column) SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
views_per_vcs = number_of_posts.unstack(fill_value=0)['ViewCount']
views_per_vcs.head()
###Output
_____no_output_____
###Markdown
Step 6: Accumulate the number of views for the VCSes for every month over all the yearsHint: First, you have to resample the data and summing it up SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
cumulated_posts = views_per_vcs.resample("1M").sum().cumsum()
cumulated_posts.head()
###Output
_____no_output_____
###Markdown
Step 7: Visualize the number of views over time for all VCSes SOLUTION (Click the arrow on the left side if a hint is needed)
###Code
%matplotlib inline
cumulated_posts.plot(title="accumulated monthly stackoverflow post views");
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/8_warp_moving_image-checkpoint.ipynb | ###Markdown
Step 8: Warp the moving image
###Code
import os
import zarr
from precomputed_tif.zarr_stack import ZarrStack
from phathom import io
import phathom.io.conversion as conversion
from phathom.registration import registration
from phathom.utils import pickle_load
working_dir = '/home/jswaney/coregistration'
# Open images
fixed_zarr_path = 'fixed/zarr_stack/1_1_1'
moving_zarr_path = 'moving/zarr_stack/1_1_1'
fixed_img = io.zarr.open(os.path.join(working_dir, fixed_zarr_path),
mode='r')
moving_img = io.zarr.open(os.path.join(working_dir, moving_zarr_path),
mode='r')
fixed_img.shape
# Load the coordinate interpolator
interpolator_path = 'map_interpolator.pkl'
interpolator = pickle_load(os.path.join(working_dir,
interpolator_path))
# Open zarr array for the registered image
nonrigid_zarr_path = 'moving/registered/1_1_1'
nonrigid_img = io.zarr.new_zarr(os.path.join(working_dir,
nonrigid_zarr_path),
fixed_img.shape,
fixed_img.chunks,
fixed_img.dtype)
# Warp the entire moving image
nb_workers = 1
batch_size = None
padding = 4
registration.register(moving_img,
nonrigid_img,
os.path.join(working_dir, interpolator_path),
nb_workers,
batch_size=batch_size,
padding=padding)
# Make the mipmap stack for neuroglancer
n_levels = 7
reg_dest_path = 'moving/registered'
reg_src_path = 'moving/registered/1_1_1'
reg_src = zarr.NestedDirectoryStore(os.path.join(working_dir,
reg_src_path))
reg_dest = os.path.join(working_dir, reg_dest_path)
reg_stack = ZarrStack(reg_src, reg_dest)
# Write info and downsampled mipmaps
reg_stack.write_info_file(n_levels)
for level in range(2, n_levels+1):
reg_stack.write_level_n(level)
# Write out to individual tiffs
zarr_path = 'moving/registered/1_1_1'
output_dir = 'moving/registered_tiffs'
nb_workers = 48
compress = 1
zarr_store = zarr.NestedDirectoryStore(os.path.join(working_dir, zarr_path))
conversion.zarr_to_tifs(zarr_store,
output_dir,
nb_workers=nb_workers,
compress=compress)
###Output
_____no_output_____ |
content-based-RS.ipynb | ###Markdown
Table of Contents- [Step A.1](Step-A.1) * [Substep: EDA](Substep:-EDA)- [Step A.2](Step-A.2)- [Step A.3](Step-A.3)- [Step A.4](Step-A.4)- [Step A.5](Step-A.5)- [Step A.6](Step-A.6)
###Code
# disclaimer: may not work as is in Windows OS
# download the “small” 5-core dataset for the category "Digital Music"
# dataset source: https://nijianmo.github.io/amazon/index.html
!wget --backups=1 http://deepyeti.ucsd.edu/jianmo/amazon/categoryFilesSmall/Digital_Music_5.json.gz -P data/
# disclaimer: may not work as is in Windows OS
# download the metadata for this dataset
# dataset source: https://nijianmo.github.io/amazon/index.html
!wget --backups=1 http://deepyeti.ucsd.edu/jianmo/amazon/metaFiles2/meta_Digital_Music.json.gz -P data/
###Output
--2022-03-19 22:59:04-- http://deepyeti.ucsd.edu/jianmo/amazon/metaFiles2/meta_Digital_Music.json.gz
Resolving deepyeti.ucsd.edu (deepyeti.ucsd.edu)... 169.228.63.50
Connecting to deepyeti.ucsd.edu (deepyeti.ucsd.edu)|169.228.63.50|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 12367273 (12M) [application/octet-stream]
Saving to: ‘data/meta_Digital_Music.json.gz’
meta_Digital_Music. 100%[===================>] 11.79M 38.8KB/s in 4m 37s
2022-03-19 23:03:42 (43.6 KB/s) - ‘data/meta_Digital_Music.json.gz’ saved [12367273/12367273]
###Markdown
Step A.1 The 5-core dataset for the category "Digital Music" subset of the [Amazon Review data](https://nijianmo.github.io/amazon/index.html) in which all users and items have at least 5 reviews.The format is one-review-per-line in JSON, with the following attributes:- `reviewerID` - ID of the reviewer, e.g. A2SUAM1J3GNN3B- `asin` - ID of the product, e.g. 0000013714- `reviewerName` - name of the reviewer- `vote` - helpful votes of the review- `style` - a disctionary of the product metadata, e.g., "Format" is "Hardcover"- `reviewText` - text of the review- `overall` - rating of the product- `summary` - summary of the review- `verified`- whether the review has been verified (boolean)- `unixReviewTime` - time of the review (unix time)- `reviewTime` - time of the review (raw)- `image` - images that users post after they have received the productMetadata includes descriptions, price, sales-rank, brand info, and co-purchasing links:- `asin` - ID of the product, e.g. 0000031852- `title` - name of the product- `feature` - bullet-point format features of the product- `description` - description of the product- `price` - price in US dollars (at time of crawl)- `imageURL` - url of the product image- `imageURLHighRes` - url of the high resolution product image- `related` - related products (also bought, also viewed, bought together, buy after viewing)- `salesRank` - sales rank information- `brand` - brand name- `categories` - list of categories the product belongs to- `tech1` - the first technical detail table of the product- `tech2` - the second technical detail table of the product- `similar_item` - similar product table- $\dots$
###Code
def inspect_df(df: pd.DataFrame, n: int = 5) -> pd.DataFrame:
"""Helper method to easily inspect DataFrames."""
print(f"shape: {df.shape}")
return df.head(n)
def parse(filepath: str) -> Iterator[dict]:
file_obj = gzip.open(filepath, "rb")
for line in file_obj:
yield json.loads(line)
def file_to_dataframe(filepath: str) -> pd.DataFrame:
i = 0
df = {}
for d in parse(filepath):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient="index")
review_data = file_to_dataframe("data/Digital_Music_5.json.gz")
inspect_df(review_data)
list(review_data.columns)
review_data.loc[2]
metadata = file_to_dataframe("data/meta_Digital_Music.json.gz")
inspect_df(metadata)
list(metadata.columns)
metadata[metadata["asin"] == review_data.loc[2]["asin"]]
metadata["asin"].value_counts()
metadata.drop_duplicates(subset="asin", keep="first", inplace=True)
# for the content-based RecSys, we need both the review rating & title, description attrs - so an inner join
data = pd.merge(review_data, metadata, how="inner", on="asin")
inspect_df(data)
metadata[metadata["asin"] == "B000091JWJ"]
###Output
_____no_output_____
###Markdown
Substep: EDA
###Code
data["created_at"] = pd.to_datetime(data["unixReviewTime"], unit="s")
data["created_at"] = pd.to_datetime(data["created_at"], format="%Y-%m")
(
ggplot(data, aes(x="overall"))
+ geom_bar(color="black")
+ labs(x="rating", title="distribution of ratings")
)
(
ggplot(data.groupby(pd.Grouper(key="created_at", freq="M")).count().reset_index())
+ geom_line(aes(x="created_at", y="asin"), color="crimson")
+ labs(x="date", y="count", title="#reviews: evolution through the years")
+ scale_x_datetime(breaks=date_breaks("6 months"), labels=date_format("%Y/%m"))
+ theme(axis_text_x=element_text(rotation=45, hjust=1), figure_size=(14, 7))
)
(
ggplot(
data.groupby([pd.Grouper(key="created_at", freq="M"), "overall"])
.count()
.reset_index()
)
+ geom_line(aes(x="created_at", y="asin"))
+ facet_wrap("overall", ncol=1)
+ labs(
x="date", y="count", title="per-rating #reviews: evolution through the years"
)
+ scale_x_datetime(breaks=date_breaks("6 months"), labels=date_format("%Y/%m"))
+ theme(axis_text_x=element_text(rotation=45, hjust=1), figure_size=(10, 7))
)
(
ggplot(data)
+ geom_smooth(aes(x="created_at", y="overall"))
+ labs(x="date", y="rating", title="rating trend through the years")
+ scale_x_datetime(breaks=date_breaks("6 months"), labels=date_format("%Y/%m"))
+ theme(axis_text_x=element_text(rotation=45, hjust=1), figure_size=(14, 7))
)
(
ggplot(data)
+ geom_smooth(
aes(x="created_at", y="overall"), method="mavg", method_args={"window": 10}
)
+ labs(x="date", y="rating", title="moving average of ratings")
+ scale_x_datetime(breaks=date_breaks("6 months"), labels=date_format("%Y/%m"))
+ theme(axis_text_x=element_text(rotation=45, hjust=1), figure_size=(14, 7))
)
###Output
_____no_output_____
###Markdown
Step A.2 Our objective is to construct “item profiles” for the items, based on information available on their metadata.
###Code
content = data.copy()
content.drop_duplicates("asin", inplace=True)
content["title"].map(lambda x: isinstance(x, str)).value_counts()
content["description"].map(lambda x: isinstance(x, list)).value_counts()
content["description"]
content[content["description"].map(len) > 1]["description"]
content["rank"].map(lambda x: isinstance(x, list)).value_counts()
def concatenate_list_field(field: list) -> str:
if not isinstance(field, list):
return field
return " ".join(field)
def extract_rank(field: str) -> int:
found = re.search("[0-9]+(,[0-9]+)+", field)
rank = found.group(0) if found else None
return int(rank.replace(",", "")) if rank else None
transformer = FeatureUnion(
[
(
"title_preprocessing",
Pipeline(
[
(
"transform_field",
FunctionTransformer(lambda x: x["title"], validate=False),
),
(
"tfidf",
TfidfVectorizer(
ngram_range=(1, 2),
stop_words=stopwords.words("english"),
strip_accents="unicode",
),
),
]
),
),
(
"description_preprocessing",
Pipeline(
[
(
"transform_field",
FunctionTransformer(
lambda x: x["description"].map(concatenate_list_field),
validate=False,
),
),
(
"tfidf",
TfidfVectorizer(
ngram_range=(1, 2),
stop_words=stopwords.words("english"),
strip_accents="unicode",
),
),
]
),
),
(
"rank_preprocessing",
Pipeline(
[
(
"transform_field",
FunctionTransformer(
lambda x: x["rank"]
.map(concatenate_list_field)
.map(extract_rank)
.fillna(0)
.to_numpy()
.reshape((len(x.index), 1)),
validate=False,
),
),
],
),
),
]
)
transformer.fit(content)
title_vocab = transformer.transformer_list[0][1].steps[1][1].get_feature_names_out()
description_vocab = (
transformer.transformer_list[1][1].steps[1][1].get_feature_names_out()
)
len(title_vocab), len(description_vocab)
transformed_content = transformer.transform(content).toarray()
transformed_content.shape
###Output
_____no_output_____
###Markdown
Step A.3
###Code
# note that the TF-IDF functionality in sklearn.feature_extraction.text can produce
# normalized vectors, in which case cosine_similarity is equivalent to linear_kernel,
# only slower.
cosine_sim_matrix = linear_kernel(transformed_content, transformed_content)
cosine_sim_matrix.shape
cosine_sim_matrix = pd.DataFrame(
cosine_sim_matrix, index=content["asin"], columns=content["asin"]
)
inspect_df(cosine_sim_matrix)
jaccard_sim_matrix = 1 - pairwise_distances(transformed_content, metric="jaccard")
jaccard_sim_matrix.shape
jaccard_sim_matrix = pd.DataFrame(
jaccard_sim_matrix, index=content["asin"], columns=content["asin"]
)
inspect_df(jaccard_sim_matrix)
###Output
shape: (185, 185)
###Markdown
Step A.4
###Code
def top_n_products_per_user(data, n: int = 5):
return (
data.sort_values(["reviewerID", "overall"], ascending=False)
.groupby("reviewerID")
.head(n)
.groupby("reviewerID")["asin"]
.apply(list)
.reset_index()
)
top_5_products_per_user = top_n_products_per_user(content, n=5)
top_5_products_per_user
###Output
_____no_output_____
###Markdown
Step A.5
###Code
def top_n_recommendations_per_user(
data: pd.DataFrame, sim_matrix: pd.DataFrame, n: int = 5
):
top_products_per_user = top_n_products_per_user(data, n)
recommendations = pd.DataFrame(columns=["reviewerID", "asin"])
recommendations["reviewerID"] = top_products_per_user["reviewerID"]
recommendations["asin"] = np.empty((len(recommendations.index), 0)).tolist()
for index, row in top_products_per_user.iterrows():
similar_movies = []
for column in sim_matrix.loc[row["asin"]].T:
similar_movies.extend(
sim_matrix.loc[row["asin"]]
.T[column]
.drop(
review_data.loc[review_data["reviewerID"] == row["reviewerID"]][
"asin"
].values,
errors="ignore",
)
.nlargest(n + 1)
.tail(n)
.index.values
)
recommendations.at[index, "asin"] = similar_movies
return recommendations
top_5_recommendations_per_user = top_n_recommendations_per_user(
data=content, sim_matrix=cosine_sim_matrix, n=5
)
top_5_recommendations_per_user
# keep also the entire ranking for each user, to be used later on the hybrid context
rankings_per_user = top_n_recommendations_per_user(
data=content, sim_matrix=cosine_sim_matrix, n=len(content["asin"].unique())
)
###Output
_____no_output_____
###Markdown
Step A.6
###Code
def compare_recommendations_for_user(id_: str) -> None:
viewable_cols = ["title", "asin"]
content_based = content.loc[
content["asin"].isin(
top_5_recommendations_per_user.loc[
top_5_recommendations_per_user["reviewerID"] == id_
]["asin"].values[0]
)
]
naive = content.loc[
content["asin"].isin(
top_5_products_per_user.loc[top_5_products_per_user["reviewerID"] == id_][
"asin"
].values[0]
)
]
print("CONTENT-BASED\n")
print(content_based[viewable_cols])
print("=" * 60)
print("NAIVE\n")
print(naive[viewable_cols])
compare_recommendations_for_user("A15OXG4V7IK2D9")
compare_recommendations_for_user("A1352I3HWDQCZH")
compare_recommendations_for_user("AYPCUQS6ARWFH")
compare_recommendations_for_user("A17B6IPLJ964N0")
compare_recommendations_for_user("AWJ9J0JAHN6PQ")
compare_recommendations_for_user("AXOO7BPM22BDO")
compare_recommendations_for_user("A15GBKY0IPZJI3")
compare_recommendations_for_user("AWMORCDEUIBFO")
compare_recommendations_for_user("AWG2O9C42XW5G")
compare_recommendations_for_user("A15JTJXQXO22JJ")
top_5_products_per_user.to_pickle("data/naive-RS.pkl")
rankings_per_user.to_pickle("data/content-based-RS.pkl")
###Output
_____no_output_____ |
Python-Data-Science-Handbook/notebooks/05.13-Kernel-Density-Estimation.ipynb | ###Markdown
In-Depth: Kernel Density Estimation 深入:核密度估计 > In the previous section we covered Gaussian mixture models (GMM), which are a kind of hybrid between a clustering estimator and a density estimator.Recall that a density estimator is an algorithm which takes a $D$-dimensional dataset and produces an estimate of the $D$-dimensional probability distribution which that data is drawn from.The GMM algorithm accomplishes this by representing the density as a weighted sum of Gaussian distributions.*Kernel density estimation* (KDE) is in some senses an algorithm which takes the mixture-of-Gaussians idea to its logical extreme: it uses a mixture consisting of one Gaussian component *per point*, resulting in an essentially non-parametric estimator of density.In this section, we will explore the motivation and uses of KDE.在上一节中我们介绍了高斯混合模型(GMM),它是一种介于聚类评估器和密度评估器的混合模型。回忆一下密度评估器的定义,这是一种从$D$维数据集中产生一个$D$维概率分布的算法。GMM算法使用了加权高斯分布和的方式实现了密度评估器。*核密度估计*在某种程度上是一个将高斯混合理念发展到其逻辑层次的算法:其中包含了每个数据点形成的一个高斯成分,最终得到一个基本上无参数的密度评估器。在本节中,我们会讨论核密度分析KDE的原理和应用。> We begin with the standard imports:导入包:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
###Output
_____no_output_____
###Markdown
Motivating KDE: Histograms 初探KDE:直方图> As already discussed, a density estimator is an algorithm which seeks to model the probability distribution that generated a dataset.For one dimensional data, you are probably already familiar with one simple density estimator: the histogram.A histogram divides the data into discrete bins, counts the number of points that fall in each bin, and then visualizes the results in an intuitive manner.前面已经讨论过,密度评估器是一种找到样本概率分布的模型,然后用来生成数据集的算法。对于一维数据而言,你应该已经熟悉其中一种简单的密度评估器:直方图。直方图将数据分成离散的桶,计算每个桶中数据点的数量,然后将结果可视化成一张非常直观的图表。> For example, let's create some data that is drawn from two normal distributions:下面我们构建一些数据形成两个正态分布:
###Code
def make_data(N, f=0.3, rseed=1):
rand = np.random.RandomState(rseed)
x = rand.randn(N)
x[int(f * N):] += 5
return x
x = make_data(1000)
###Output
_____no_output_____
###Markdown
> We have previously seen that the standard count-based histogram can be created with the ``plt.hist()`` function.By specifying the ``normed`` parameter of the histogram, we end up with a normalized histogram where the height of the bins does not reflect counts, but instead reflects probability density:前面我们已经看到标准的直方图可以使用`plt.hist()`函数绘制。通过设置`density`参数,我们可以将直方图标准化,这时图像的高度不再代表数据点的数量,而是概率密度:译者注:新版Matplotlib已经不再使用normed参数,原文和代码中的参数名称已经修改为density。
###Code
hist = plt.hist(x, bins=30, density=True)
###Output
_____no_output_____
###Markdown
> Notice that for equal binning, this normalization simply changes the scale on the y-axis, leaving the relative heights essentially the same as in a histogram built from counts.This normalization is chosen so that the total area under the histogram is equal to 1, as we can confirm by looking at the output of the histogram function:注意对上图来说,标准化只是修改了y轴的度量,但是每个桶相对高度与使用简单求和构建的直方图是一致的。标准化能够使得直方图的全部面积加起来等于1,可以通过直方图函数的输出结果来进行验证:
###Code
density, bins, patches = hist
widths = bins[1:] - bins[:-1]
(density * widths).sum()
###Output
_____no_output_____
###Markdown
> One of the issues with using a histogram as a density estimator is that the choice of bin size and location can lead to representations that have qualitatively different features.For example, if we look at a version of this data with only 20 points, the choice of how to draw the bins can lead to an entirely different interpretation of the data!Consider this example:使用直方图作为密度评估器的一个问题是桶大小和位置的选择会导致展现出不同的数据特征。例如我们仅仅使用20个数据点的情况下,不同的选择会得到完全不同的数据解释。如下例:
###Code
x = make_data(20)
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(1, 2, figsize=(12, 4),
sharex=True, sharey=True,
subplot_kw={'xlim':(-4, 9),
'ylim':(-0.02, 0.3)})
fig.subplots_adjust(wspace=0.05)
for i, offset in enumerate([0.0, 0.6]):
ax[i].hist(x, bins=bins + offset, density=True)
ax[i].plot(x, np.full_like(x, -0.01), '|k',
markeredgewidth=1)
###Output
_____no_output_____
###Markdown
> On the left, the histogram makes clear that this is a bimodal distribution.On the right, we see a unimodal distribution with a long tail.Without seeing the preceding code, you would probably not guess that these two histograms were built from the same data: with that in mind, how can you trust the intuition that histograms confer?And how might we improve on this?左边的直方图很明显是一个双峰分布。右边的直方图却是一个单峰分布。如果不是看到了前面的代码,我们可能会猜测这两个直方图是从不同的数据集获得的:在这种情况下,如何能信任直方图给我们关于数据分布的直觉呢?该如何改进这点呢?> Stepping back, we can think of a histogram as a stack of blocks, where we stack one block within each bin on top of each point in the dataset.Let's view this directly:再回头深入考虑一下,我们可以将直方图想象成方块组成的堆,将数据集中的每个数据点都作为一个方块放置到其从属的桶的最上方。我们来看看:
###Code
fig, ax = plt.subplots()
bins = np.arange(-3, 8)
ax.plot(x, np.full_like(x, -0.1), '|k',
markeredgewidth=1)
for count, edge in zip(*np.histogram(x, bins)):
for i in range(count):
ax.add_patch(plt.Rectangle((edge, i), 1, 1,
alpha=0.5))
ax.set_xlim(-4, 8)
ax.set_ylim(-0.2, 8);
###Output
_____no_output_____
###Markdown
> The problem with our two binnings stems from the fact that the height of the block stack often reflects not on the actual density of points nearby, but on coincidences of how the bins align with the data points.This mis-alignment between points and their blocks is a potential cause of the poor histogram results seen here.But what if, instead of stacking the blocks aligned with the *bins*, we were to stack the blocks aligned with the *points they represent*?If we do this, the blocks won't be aligned, but we can add their contributions at each location along the x-axis to find the result.Let's try this:刚才看到那两个直方图的问题实质在于,方块组成的堆高度通常反映的不是实际的附近数据点密度,而是取决于桶与数据点对齐的选择方式,这具有一定的偶然性。不合适的选择就是我们前面看到不正确的直方图结果的原因。但是如果我们不是将方块叠放到桶上,而是将方块叠放到它们*所代表的数据点*上会怎么样?这样做的话,这些方块不会对齐,我们可以将每个数据点在x轴的每个位置上的贡献累加起来得到结果。例如:
###Code
x_d = np.linspace(-4, 8, 2000)
density = sum((abs(xi - x_d) < 0.5) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 8]);
###Output
_____no_output_____
###Markdown
> The result looks a bit messy, but is a much more robust reflection of the actual data characteristics than is the standard histogram.Still, the rough edges are not aesthetically pleasing, nor are they reflective of any true properties of the data.In order to smooth them out, we might decide to replace the blocks at each location with a smooth function, like a Gaussian.Let's use a standard normal curve at each point instead of a block:虽然结果看起来有点乱,但是它能比标准直方图更加健壮地反映数据的特征。然而图中的坚硬边界很不美观,且它们也无法反映数据的真实属性。我们可以考虑使用光滑的函数,如高斯函数,来平滑这个图形。下面我们在每个数据点上使用使用标准正态曲线来取代叠放的方块:
###Code
from scipy.stats import norm
x_d = np.linspace(-4, 8, 1000)
density = sum(norm(xi).pdf(x_d) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 5]);
###Output
_____no_output_____
###Markdown
> This smoothed-out plot, with a Gaussian distribution contributed at the location of each input point, gives a much more accurate idea of the shape of the data distribution, and one which has much less variance (i.e., changes much less in response to differences in sampling).平滑后的图像,在每个输入点上都是高斯分布,能够提供对于数据分布的更加精确的形状,而且具有更少的差异(因为取样不同产生的差异小了许多)。> These last two plots are examples of kernel density estimation in one dimension: the first uses a so-called "tophat" kernel and the second uses a Gaussian kernel.We'll now look at kernel density estimation in more detail.后面这两张图就是核密度估计在一维数据上的例子:第一幅图使用的是“高帽”核,第二幅图使用的是高斯核。下面我们详细讨论核密度估计。 Kernel Density Estimation in Practice 实践中使用核密度估计> The free parameters of kernel density estimation are the *kernel*, which specifies the shape of the distribution placed at each point, and the *kernel bandwidth*, which controls the size of the kernel at each point.In practice, there are many kernels you might use for a kernel density estimation: in particular, the Scikit-Learn KDE implementation supports one of six kernels, which you can read about in Scikit-Learn's [Density Estimation documentation](http://scikit-learn.org/stable/modules/density.html).核密度估计中的自由参数是*核*,它设定了分布在每个点的形状以及控制着每个点上核的大小(被称为*核带宽*)的参数。实践中有许多可用的核密度估计:具体来说,Scikit-Learn的KDE实现了其中的6种,读者可以在Scikit-Learn在线文档[密度估计](http://scikit-learn.org/stable/modules/density.html)中查看。> While there are several versions of kernel density estimation implemented in Python (notably in the SciPy and StatsModels packages), I prefer to use Scikit-Learn's version because of its efficiency and flexibility.It is implemented in the ``sklearn.neighbors.KernelDensity`` estimator, which handles KDE in multiple dimensions with one of six kernels and one of a couple dozen distance metrics.Because KDE can be fairly computationally intensive, the Scikit-Learn estimator uses a tree-based algorithm under the hood and can trade off computation time for accuracy using the ``atol`` (absolute tolerance) and ``rtol`` (relative tolerance) parameters.The kernel bandwidth, which is a free parameter, can be determined using Scikit-Learn's standard cross validation tools as we will soon see.虽然Python当中有一些核密度估计的实现(主要是在SciPy和StatsModels包中),作者还是建议使用Scikit-Learn的版本,因为它具有高效和灵活的特性。这些评估器被实现在`sklearn.neighbors.KernelDensity`当中,它们能使用6种核类型以及数十种距离度量计算方法在多维数据中实现KDE。因为KDE方法较为计算密集,Scikit-Learn的评估器在底层使用了树形算法,并且能够使用`atol`(绝对容差)和`rtol`(相对容差)来平衡计算时间与精确度。其中的自由参数核带宽可以使用标准的交叉验证工具决定,我们马上就会看到。> Let's first show a simple example of replicating the above plot using the Scikit-Learn ``KernelDensity`` estimator:下面使用Scikit-Learn的`KernelDensity`评估器重复一下上面的图表,作为一个简单的例子:
###Code
from sklearn.neighbors import KernelDensity
# 初始化KDE模型,拟合数据
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(x[:, None])
# score_samples返回概率密度的对数值
logprob = kde.score_samples(x_d[:, None])
plt.fill_between(x_d, np.exp(logprob), alpha=0.5)
plt.plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
plt.ylim(-0.02, 0.22);
###Output
_____no_output_____
###Markdown
> The result here is normalized such that the area under the curve is equal to 1.上面的结果已经标准化了,因此曲线下方的面积为1。 Selecting the bandwidth via cross-validation 通过交叉验证选择带宽> The choice of bandwidth within KDE is extremely important to finding a suitable density estimate, and is the knob that controls the bias–variance trade-off in the estimate of density: too narrow a bandwidth leads to a high-variance estimate (i.e., over-fitting), where the presence or absence of a single point makes a large difference. Too wide a bandwidth leads to a high-bias estimate (i.e., under-fitting) where the structure in the data is washed out by the wide kernel.KDE中带宽的选择对于寻找合适的密度估计是至关重要的,同时也是控制偏差的开关,这是密度估计方差的权衡值:太窄的带宽会导致高方差估计(也就是过拟合),也就是一个数据点的存在或缺失会导致巨大的差异。太宽泛的带宽会导致高偏差估计(也就是欠拟合),整个数据的结构被过宽的核给抹平了。> There is a long history in statistics of methods to quickly estimate the best bandwidth based on rather stringent assumptions about the data: if you look up the KDE implementations in the SciPy and StatsModels packages, for example, you will see implementations based on some of these rules.在统计学中,基于数据相当严格的假设来估计最佳带宽有着很长的历史:如果你查看SciPy和StatsModels包中的KDE实现,你可以看到其中一些规则的实现。> In machine learning contexts, we've seen that such hyperparameter tuning often is done empirically via a cross-validation approach.With this in mind, the ``KernelDensity`` estimator in Scikit-Learn is designed such that it can be used directly within the Scikit-Learn's standard grid search tools.Here we will use ``GridSearchCV`` to optimize the bandwidth for the preceding dataset.Because we are looking at such a small dataset, we will use leave-one-out cross-validation, which minimizes the reduction in training set size for each cross-validation trial:在机器学习领域,我们已经知道这样的超参数调整通常可以通过交叉验证方法来实现。因此Scikit-Learn中的`KernelDensity`评估器被设计成可以直接使用Scikit-Learn的标准网格搜索工具。这里我们将使用`GridSearchCV`来对前面的数据集的带宽进行优化。因为这是一个非常小的数据集,我们会使用留出一个的交叉验证方法,这能在每次交叉验证测试中尽量保证训练集的最大样本量:译者注:新版Scikit-Learn已经将GridSearchCV和LeaveOneOut移到了`sklearn.model_selection`包中,并且LeaveOneOut不再需要提供参数。下面的代码做了相应修改。
###Code
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
bandwidths = 10 ** np.linspace(-1, 1, 100)
grid = GridSearchCV(KernelDensity(kernel='gaussian'),
{'bandwidth': bandwidths},
cv=LeaveOneOut())
grid.fit(x[:, None]);
###Output
_____no_output_____
###Markdown
> Now we can find the choice of bandwidth which maximizes the score (which in this case defaults to the log-likelihood):然后就可以得到最大分值的带宽了(本例中分值默认为对数分值):
###Code
grid.best_params_
###Output
_____no_output_____
###Markdown
> The optimal bandwidth happens to be very close to what we used in the example plot earlier, where the bandwidth was 1.0 (i.e., the default width of ``scipy.stats.norm``).带宽的最优值正好非常接近我们在前面例子中使用的1.0(也是`scipy.stats.norm`的默认宽度)。 Example: KDE on a Sphere 例子:球面上的KDE> Perhaps the most common use of KDE is in graphically representing distributions of points.For example, in the Seaborn visualization library (see [Visualization With Seaborn](04.14-Visualization-With-Seaborn.ipynb)), KDE is built in and automatically used to help visualize points in one and two dimensions.KDE最常见的应用可能是数据点分布的图像表示。例如在Seaborn可视化库(参见[使用Seaborn可视化](04.14-Visualization-With-Seaborn.ipynb))中,KDE是在一维和二维空间中的內建的自动化可视化方法。> Here we will look at a slightly more sophisticated use of KDE for visualization of distributions.We will make use of some geographic data that can be loaded with Scikit-Learn: the geographic distributions of recorded observations of two South American mammals, *Bradypus variegatus* (the Brown-throated Sloth) and *Microryzomys minutus* (the Forest Small Rice Rat).这里我们将要讨论一个稍微复杂一些的KDE进行数据分布可视化的例子:观察记录到两种南美哺乳动物的地理分布情况,棕喉树懒和森林小稻鼠。> With Scikit-Learn, we can fetch this data as follows:使用Scikit-Learn如下获取数据:
###Code
from sklearn.datasets import fetch_species_distributions
data = fetch_species_distributions()
# 提取物种ID和位置数据
latlon = np.vstack([data.train['dd lat'],
data.train['dd long']]).T
species = np.array([d.decode('ascii').startswith('micro')
for d in data.train['species']], dtype='int')
###Output
_____no_output_____
###Markdown
> With this data loaded, we can use the Basemap toolkit (mentioned previously in [Geographic Data with Basemap](04.13-Geographic-Data-With-Basemap.ipynb)) to plot the observed locations of these two species on the map of South America.当数据载入后,我们可以使用Basemap工具集(之前在[使用Basemap创建地理位置图表](04.13-Geographic-Data-With-Basemap.ipynb)中介绍过)来绘制这两个物种在南美洲地图上观测的位置。译者注:译者所用Scikit-Learn版本有个issue,警告species_distributions模块已经过时,master分支已经修复,但未并入发行版,此处保留了该警告,但不影响后续功能
###Code
from mpl_toolkits.basemap import Basemap
from sklearn.datasets.species_distributions import construct_grids
xgrid, ygrid = construct_grids(data)
# 使用basemap绘制海岸线
m = Basemap(projection='cyl', resolution='c',
llcrnrlat=ygrid.min(), urcrnrlat=ygrid.max(),
llcrnrlon=xgrid.min(), urcrnrlon=xgrid.max())
m.drawmapboundary(fill_color='#DDEEFF')
m.fillcontinents(color='#FFEEDD')
m.drawcoastlines(color='gray', zorder=2)
m.drawcountries(color='gray', zorder=2)
# 绘制位置
m.scatter(latlon[:, 1], latlon[:, 0], zorder=3,
c=species, cmap='rainbow', latlon=True);
###Output
/home/wangy/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.datasets.species_distributions module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.datasets. Anything that cannot be imported from sklearn.datasets is now part of the private API.
warnings.warn(message, FutureWarning)
###Markdown
> Unfortunately, this doesn't give a very good idea of the density of the species, because points in the species range may overlap one another.You may not realize it by looking at this plot, but there are over 1,600 points shown here!不过,上图并没有给出这两种动物的分布密度估计,因为这些点的范围互相重叠了。上图中有很多分布点,实际上有超过1600个数据点绘制在图中。> Let's use kernel density estimation to show this distribution in a more interpretable way: as a smooth indication of density on the map.Because the coordinate system here lies on a spherical surface rather than a flat plane, we will use the ``haversine`` distance metric, which will correctly represent distances on a curved surface.让我们使用核密度估计将这个分布展示成更加有含义的图表:在地图上显示平滑的密度分布情况。因为实际上使用的是球面坐标系统而不是平面坐标系,所以距离度量采取了`haversine`,这是一个能正确表达曲面距离的方法。> There is a bit of boilerplate code here (one of the disadvantages of the Basemap toolkit) but the meaning of each code block should be clear:下面的代码有点冗长(Basemap工具集的缺点之一),但是每个代码块的含义还是很清晰的:
###Code
# 设置地图上的数据网格
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = np.radians(xy[land_mask])
# 创建两个并排的图表
fig, ax = plt.subplots(1, 2)
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
cmaps = ['Purples', 'Reds']
for i, axi in enumerate(ax):
axi.set_title(species_names[i])
# 使用basemap绘制海岸线和国境线
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c', ax=axi)
m.drawmapboundary(fill_color='#DDEEFF')
m.drawcoastlines()
m.drawcountries()
# 构造分布的球面核密度估计
kde = KernelDensity(bandwidth=0.03, metric='haversine')
kde.fit(np.radians(latlon[species == i]))
# 仅计算陆地范围,-9999代表海洋
Z = np.full(land_mask.shape[0], -9999.0)
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# 绘制密度的轮廓
levels = np.linspace(0, Z.max(), 25)
axi.contourf(X, Y, Z, levels=levels, cmap=cmaps[i])
###Output
_____no_output_____
###Markdown
> Compared to the simple scatter plot we initially used, this visualization paints a much clearer picture of the geographical distribution of observations of these two species.对比前面我们绘制的简单散点图,上面两个图表很清晰的展示了两种动物的地理位置分布情况。 Example: Not-So-Naive Bayes 例子:非朴素贝叶斯> This example looks at Bayesian generative classification with KDE, and demonstrates how to use the Scikit-Learn architecture to create a custom estimator.下面这个例子我们来看下使用KDE创建贝叶斯生成分类,并且展示如何使用Scikit-Learn创建自定义的评估器。> In [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb), we took a look at naive Bayesian classification, in which we created a simple generative model for each class, and used these models to build a fast classifier.For Gaussian naive Bayes, the generative model is a simple axis-aligned Gaussian.With a density estimation algorithm like KDE, we can remove the "naive" element and perform the same classification with a more sophisticated generative model for each class.It's still Bayesian classification, but it's no longer naive.在[深入:朴素贝叶斯分类](05.05-Naive-Bayes.ipynb)中,我们学习了朴素贝叶斯分类,里面构建了每个类别的简单生成模型并且使用这些模型来构建一个快速分类器。对于高斯朴素贝叶斯来说,生成模型就是简单的沿着坐标轴的高斯函数。使用密度估计算法如KDE,我们可以去除其中的“朴素”成分,然后对每个类别使用更加复杂的生成模型进行相同的分类工作。这仍然是贝叶斯分类,只是不再朴素。> The general approach for generative classification is this:>1. Split the training data by label.2. For each set, fit a KDE to obtain a generative model of the data. This allows you for any observation $x$ and label $y$ to compute a likelihood $P(x~|~y)$. 3. From the number of examples of each class in the training set, compute the *class prior*, $P(y)$.4. For an unknown point $x$, the posterior probability for each class is $P(y~|~x) \propto P(x~|~y)P(y)$. The class which maximizes this posterior is the label assigned to the point.生成分类的通用方法如下:1. 将训练数据依据标签划分成不同类别。2. 对每个类别,使用KDE拟合数据获得一个生成模型。这允许你对于任何观察$x$和标签$y$计算出似然$P(x~|~y)$。3. 对训练集中的每个类别,从样本数量计算得到*类别先验概率*$P(y)$。4. 对一个未知点$x$,每个类别的后验概率是$P(y~|~x) \propto P(x~|~y)P(y)$。哪个类别具有最大的后验概率值,就将这个点设置为该类别标签。> The algorithm is straightforward and intuitive to understand; the more difficult piece is couching it within the Scikit-Learn framework in order to make use of the grid search and cross-validation architecture.上述算法很直观和易于理解;更困难的部分是将它实现在Scikit-Learn框架当中,这样就能使用网格搜索和交叉验证工具。> This is the code that implements the algorithm within the Scikit-Learn framework; we will step through it following the code block:下面是Scikit-Learn框架中实现这个算法的代码;我们过一遍这些代码片段:
###Code
from sklearn.base import BaseEstimator, ClassifierMixin
class KDEClassifier(BaseEstimator, ClassifierMixin):
"""Bayesian generative classification based on KDE
Parameters
----------
bandwidth : float
the kernel bandwidth within each class
kernel : str
the kernel name, passed to KernelDensity
"""
def __init__(self, bandwidth=1.0, kernel='gaussian'):
self.bandwidth = bandwidth
self.kernel = kernel
def fit(self, X, y):
self.classes_ = np.sort(np.unique(y))
training_sets = [X[y == yi] for yi in self.classes_]
self.models_ = [KernelDensity(bandwidth=self.bandwidth,
kernel=self.kernel).fit(Xi)
for Xi in training_sets]
self.logpriors_ = [np.log(Xi.shape[0] / X.shape[0])
for Xi in training_sets]
return self
def predict_proba(self, X):
logprobs = np.array([model.score_samples(X)
for model in self.models_]).T
result = np.exp(logprobs + self.logpriors_)
return result / result.sum(1, keepdims=True)
def predict(self, X):
return self.classes_[np.argmax(self.predict_proba(X), 1)]
###Output
_____no_output_____
###Markdown
The anatomy of a custom estimator 自定义评估器代码剖析 > Let's step through this code and discuss the essential features:让我们一步一步的分析上面的代码并讨论其中最关键的特性:```pythonfrom sklearn.base import BaseEstimator, ClassifierMixinclass KDEClassifier(BaseEstimator, ClassifierMixin): """Bayesian generative classification based on KDE Parameters ---------- bandwidth : float the kernel bandwidth within each class kernel : str the kernel name, passed to KernelDensity """```> Each estimator in Scikit-Learn is a class, and it is most convenient for this class to inherit from the ``BaseEstimator`` class as well as the appropriate mixin, which provides standard functionality.For example, among other things, here the ``BaseEstimator`` contains the logic necessary to clone/copy an estimator for use in a cross-validation procedure, and ``ClassifierMixin`` defines a default ``score()`` method used by such routines.We also provide a doc string, which will be captured by IPython's help functionality (see [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)).Scikit-Learn中的每个评估器都是一个类(译者注:Python类),对于评估器类来说最方便的就是继承`BaseEstimator`类以及相应的混合器,它们能提供标准的功能。例如这里`BaseEstimator`包含着代码逻辑当需要使用交叉验证过程时能复制评估器的副本,`ClassifierMixin`定义了默认的`score()`方法给分类器继承。下面是文档字符串,可以被IPython的帮助功能捕获到(参见[IPython的帮助和文档](01.01-Help-And-Documentation.ipynb))。 > Next comes the class initialization method:下面是类实例初始化方法:```python def __init__(self, bandwidth=1.0, kernel='gaussian'): self.bandwidth = bandwidth self.kernel = kernel```> This is the actual code that is executed when the object is instantiated with ``KDEClassifier()``.In Scikit-Learn, it is important that *initialization contains no operations* other than assigning the passed values by name to ``self``.This is due to the logic contained in ``BaseEstimator`` required for cloning and modifying estimators for cross-validation, grid search, and other functions.Similarly, all arguments to ``__init__`` should be explicit: i.e. ``*args`` or ``**kwargs`` should be avoided, as they will not be correctly handled within cross-validation routines.这个方法的代码是当对象实例通过`KDEClassifier()`创建完成后初始化执行的部分。在Scikit-Learn中,很重要的一点需要记住,初始化方法除了通过`self`设置对象属性外不能包括其他的操作。这是因为`BaseEstimator`中的代码逻辑在交叉验证、网格搜索和其他功能时需要克隆和修改评估器。类似的,`__init__`方法的参数应该是显式定义的:`*args`或`**kwargs`的定义方式应该避免,同样是因为它们无法被交叉验证过程正确的处理。 > Next comes the ``fit()`` method, where we handle training data:接下来是`fit()`方法,对训练数据进行拟合:```python def fit(self, X, y): self.classes_ = np.sort(np.unique(y)) training_sets = [X[y == yi] for yi in self.classes_] self.models_ = [KernelDensity(bandwidth=self.bandwidth, kernel=self.kernel).fit(Xi) for Xi in training_sets] self.logpriors_ = [np.log(Xi.shape[0] / X.shape[0]) for Xi in training_sets] return self```> Here we find the unique classes in the training data, train a ``KernelDensity`` model for each class, and compute the class priors based on the number of input samples.Finally, ``fit()`` should always return ``self`` so that we can chain commands. For example:首先找出训练数据中所有唯一的分类标签,对每个分类独立训练一个`KernelDensity`模型,然后根据输入样本数量计算每个分类的先验概率。最后`fit()`方法应该永远返回`self`令其支持链式操作。例如:```pythonlabel = model.fit(X, y).predict(X)```> Notice that each persistent result of the fit is stored with a trailing underscore (e.g., ``self.logpriors_``).This is a convention used in Scikit-Learn so that you can quickly scan the members of an estimator (using IPython's tab completion) and see exactly which members are fit to training data.注意一下`fit`得到的持久化结果应该保存在后缀下划线名称的属性当中(例如`self.logpriors_`)。这是Scikit-Learn的编码规范方便用户迅速的查看评估器的成员值(使用IPython的制表符补全)并获得已经拟合到训练数据上的成员变量值。 > Finally, we have the logic for predicting labels on new data:最后我们看到的是在新数据上预测标签的逻辑:```python def predict_proba(self, X): logprobs = np.vstack([model.score_samples(X) for model in self.models_]).T result = np.exp(logprobs + self.logpriors_) return result / result.sum(1, keepdims=True) def predict(self, X): return self.classes_[np.argmax(self.predict_proba(X), 1)]```> Because this is a probabilistic classifier, we first implement ``predict_proba()`` which returns an array of class probabilities of shape ``[n_samples, n_classes]``.Entry ``[i, j]`` of this array is the posterior probability that sample ``i`` is a member of class ``j``, computed by multiplying the likelihood by the class prior and normalizing.因为这是一个概率分类器,我们首先实现了`predict_proba()`方法,它返回新数据在每个分类上的后验概率数组,形状是`[n_samples, n_classes]`。数组中的元素`[i, j]`是样本`i`属于分类`j`的后验概率值,通过将似然值与分类先验概率值相乘并标准化后得到。> Finally, the ``predict()`` method uses these probabilities and simply returns the class with the largest probability.最后`predict()`方法使用这些概率并在其中找到最大值,然后返回分类的标签。 Using our custom estimator 使用我们的自定义评估器> Let's try this custom estimator on a problem we have seen before: the classification of hand-written digits.Here we will load the digits, and compute the cross-validation score for a range of candidate bandwidths using the ``GridSearchCV`` meta-estimator (refer back to [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb)):下面让我们试一下这个自定义评估器,使用前面我们研究过的问题:手写数字分类。我们载入手写数字数据,然后针对一定范围的带宽值使用`GridSearchCV`元评估器计算交叉验证结果(参见[超参数和模型验证](05.03-Hyperparameters-and-Model-Validation.ipynb)):译者注:下面代码做了修改以适应新版本Scikit-Learn。包括GridSearchCV从属的包,参数cv和结果中使用cv_result_字典取分值。
###Code
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
digits = load_digits()
bandwidths = 10 ** np.linspace(0, 2, 100)
grid = GridSearchCV(KDEClassifier(), {'bandwidth': bandwidths}, cv=5)
grid.fit(digits.data, digits.target)
scores = grid.cv_results_['mean_test_score']
###Output
_____no_output_____
###Markdown
> Next we can plot the cross-validation score as a function of bandwidth:接下来我们可以绘制交叉验证分值与带宽之间的函数图像:
###Code
plt.semilogx(bandwidths, scores)
plt.xlabel('bandwidth')
plt.ylabel('accuracy')
plt.title('KDE Model Performance')
print(grid.best_params_)
print('accuracy =', grid.best_score_)
###Output
{'bandwidth': 6.135907273413174}
accuracy = 0.9677298050139276
###Markdown
> We see that this not-so-naive Bayesian classifier reaches a cross-validation accuracy of just over 96%; this is compared to around 80% for the naive Bayesian classification:我们看到这个不那么朴素的贝叶斯分类器达到了交叉验证准确率超过96%;而朴素贝叶斯分类只有大约80%:
###Code
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
cross_val_score(GaussianNB(), digits.data, digits.target, cv=5).mean()
###Output
_____no_output_____ |
imdb.ipynb | ###Markdown
Define Field to preprocess raw data
###Code
import torch
from torchtext.datasets import IMDB
train_iter = IMDB(split='train')
next(train_iter)
from torchtext.data.utils import get_tokenizer
from collections import Counter
from torchtext.vocab import Vocab
tokenizer = get_tokenizer('basic_english')
counter = Counter()
for (label, line) in train_iter:
counter.update(tokenizer(line))
vocab = Vocab(counter, min_freq=1,
max_size = 25000,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_)
vocab.freqs.most_common(20)
text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)]
label_pipeline = lambda x: 0 if x=='neg' else 1
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
train_iter = IMDB(split='train')
dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
for idx, (label, text, offsets) in enumerate(dataloader):
print(text.shape)
print(label)
###Output
torch.Size([1805])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2298])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2387])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1764])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1900])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2530])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2432])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1702])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2240])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1800])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3041])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2914])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2056])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1860])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1950])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1639])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2486])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2149])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1956])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1651])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1940])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1817])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2334])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1227])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2351])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1708])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2319])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1723])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2575])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2250])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1544])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2958])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1593])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2341])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1743])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2309])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1676])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1851])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1797])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2114])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1166])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([859])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1316])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1704])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2166])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1980])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2721])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1116])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1865])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2199])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1157])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2463])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3202])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1885])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1466])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1849])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2134])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3806])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1638])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3248])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2937])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1623])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2776])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1757])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1975])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2772])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3063])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1886])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2089])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2219])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1870])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1856])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1937])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2023])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2222])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2048])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3432])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2194])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1277])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1990])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1418])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3869])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2741])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3471])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1760])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3064])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2647])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2329])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1836])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2180])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2712])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1603])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2735])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([4413])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3289])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1849])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3386])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2225])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1904])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1912])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2710])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2267])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1604])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2658])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1538])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1701])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2151])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1610])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1773])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2417])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1500])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1525])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2543])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1656])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2177])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3494])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2053])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3223])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1261])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1943])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2099])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2312])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2415])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1971])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1470])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2157])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1178])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2449])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2804])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1296])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1996])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1152])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1617])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1656])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2108])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2250])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1793])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([962])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2628])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1881])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3179])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1747])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1265])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2225])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1465])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2058])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3964])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1917])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1028])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2848])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2193])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2612])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2249])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1942])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2011])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2406])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1591])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2231])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2088])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1795])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2570])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1963])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1586])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1712])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1609])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1910])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2682])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([3096])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1987])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1359])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2317])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2423])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1740])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2149])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1956])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2420])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([1504])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([991])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2185])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2634])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2557])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
torch.Size([2785])
tensor([0, 0, 0, 0, 0, 0, 0, 0])
###Markdown
Define model
###Code
from torch import nn
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
num_class = 2
vocab_size = len(vocab)
emsize = 64
model = TextClassificationModel(vocab_size, emsize, num_class).to(device)
###Output
_____no_output_____
###Markdown
Define dataloader
###Code
import time
def train(dataloader):
model.train()
total_acc, total_count = 0, 0
log_interval = 500
start_time = time.time()
for idx, (label, text, offsets) in enumerate(dataloader):
optimizer.zero_grad()
predited_label = model(text, offsets)
loss = criterion(predited_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
total_acc += (predited_label.argmax(1) == label).sum().item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches '
'| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),
total_acc/total_count))
total_acc, total_count = 0, 0
start_time = time.time()
def evaluate(dataloader):
model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(dataloader):
predited_label = model(text, offsets)
loss = criterion(predited_label, label)
total_acc += (predited_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
from torch.utils.data.dataset import random_split
# Hyperparameters
EPOCHS = 10 # epoch
LR = 5 # learning rate
BATCH_SIZE = 64 # batch size for training
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
train_iter, test_iter = IMDB()
train_dataset = list(train_iter)
test_dataset = list(test_iter)
num_train = int(len(train_dataset) * 0.95)
split_train_, split_valid_ = \
random_split(train_dataset, [num_train, len(train_dataset) - num_train])
train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
for epoch in range(1, EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader)
accu_val = evaluate(valid_dataloader)
if total_accu is not None and total_accu > accu_val:
scheduler.step()
else:
total_accu = accu_val
print('-' * 59)
print('| end of epoch {:3d} | time: {:5.2f}s | '
'valid accuracy {:8.3f} '.format(epoch,
time.time() - epoch_start_time,
accu_val))
print('-' * 59)
###Output
torch.Size([64, 64])
###Markdown
Evaluation
###Code
print('Checking the results of test dataset.')
accu_test = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(accu_test))
###Output
Checking the results of test dataset.
test accuracy 0.853
###Markdown
Predict on a single instance
###Code
ag_news_label = {0: "negative",
1: "positive"}
def predict(text, text_pipeline):
with torch.no_grad():
text = torch.tensor(text_pipeline(text))
output = model(text, torch.tensor([0]))
print(output)
return output.argmax(1).item()
model = model.to("cpu")
ex_text_str = "It was a wonderful educational short journey into the deep blue searching for the king,\
the great white shark , me and my family were fully enjoyed watching this beautiful movie and we were \
fascinated by the magnificent clear blue color under water and the great camera work , unfortunately it\
was too short (only 40 minutes), like we say in Arabic ( happy times passes fast) , but it's really worth our time."
print("This is a %s review" %ag_news_label[predict(ex_text_str, text_pipeline)])
###Output
tensor([[-2.7194, 2.3986]])
This is a positive review
###Markdown
###Code
# Building a Model
from tensorflow import keras
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
# Training the model
from keras.datasets import imdb
from keras import losses
from keras import metrics
from keras import optimizers
import numpy as np
# Loading imdb data in training and testing variables, num_words argument keeps top 10000 frequently occuring words in training data
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = 10000)
def vectorize_sequences(sequences, dimension=10000):
'''Vectorize functon to vectorize training and testing data'''
results = np.zeros((len(sequences), dimension)) # Creates a zero matrix of shape (len(sequences),dimensions)
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # Sets specific indices of results[i] to '1'
return results
x_train = vectorize_sequences(train_data) # Vectorize training data
x_test = vectorize_sequences(test_data) # Vectorize testing data
y_train = np.asarray(train_labels).astype('float32') # Vectorize training labels
y_test = np.asarray(test_labels).astype('float32') # Vectorize testing labels
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
model.compile(optimizer=optimizers.RMSprop(lr=0.001),
loss=losses.binary_crossentropy,
metrics=[metrics.binary_accuracy]) # Compiling model
history = model.fit(x_train,
y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val)) # Training model
# Decoding the review in English Sentence
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
print(decoded_review.replace('?',''))
# Plotting binary accuracy, validation binary accuracy, loss, validation loss,
import matplotlib.pyplot as plt
acc = history.history['binary_accuracy']
val_acc = history.history['val_binary_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'go', label='Training loss')
plt.plot(epochs, val_loss, 'g', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Testing the trained model
test_loss, test_accuracy = model.evaluate(x_test, y_test)
print("Loss :",test_loss)
print("Accuracy :",test_accuracy)
model.save('imdb_model.h5')
for i in range(1,11):
print("\nReview : {}".format(i))
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in test_data[i]])
print(decoded_review.replace('?',''))
pred = model.predict(x_test)
example = pred[i][0]
result = round(example)
if(result == 0):
print("\nReview is NEGATIVE")
else:
print("\nReview is POSITIVE")
###Output
Review : 1
this film requires a lot of patience because it focuses on mood and character development the plot is very simple and many of the scenes take place on the same set in frances the sandy dennis character apartment but the film builds to a disturbing climax br br the characters create an atmosphere with sexual tension and psychological it's very interesting that robert altman directed this considering the style and structure of his other films still the trademark altman audio style is evident here and there i think what really makes this film work is the brilliant performance by sandy dennis it's definitely one of her darker characters but she plays it so perfectly and convincingly that it's scary michael burns does a good job as the mute young man regular altman player michael murphy has a small part the moody set fits the content of the story very well in short this movie is a powerful study of loneliness sexual and desperation be patient up the atmosphere and pay attention to the wonderfully written script br br i praise robert altman this is one of his many films that deals with unconventional fascinating subject matter this film is disturbing but it's sincere and it's sure to a strong emotional response from the viewer if you want to see an unusual film some might even say bizarre this is worth the time br br unfortunately it's very difficult to find in video stores you may have to buy it off the internet
Review is POSITIVE
Review : 2
many animation buffs consider the great forgotten genius of one special branch of the art puppet animation which he invented almost single and as it happened almost accidentally as a young man was more interested in than the cinema but his attempt to film two fighting led to an unexpected breakthrough in film making when he realized he could movement by beetle and them one frame at a time this discovery led to the production of amazingly elaborate classic short the revenge which he made in russia in at a time when motion picture animation of all sorts was in its br br the political of the russian revolution caused to move to paris where one of his first productions was a dark political satire known as or the who wanted a king a strain of black comedy can be found in almost all of films but here it is very dark indeed aimed more at grown ups who can appreciate the satirical aspects than children who would most likely find the climax i'm middle aged and found it pretty myself and indeed of the film intended for english speaking viewers of the 1920s were given title cards filled with and in order to help the sharp of the finale br br our tale is set in a swamp the where the citizens are unhappy with their government and have called a special session to see what they can do to improve matters they decide to for a king the crowds are animated in this opening sequence it couldn't have been easy to make so many frog puppets look alive simultaneously while for his part is depicted as a white guy in the clouds who looks like he'd rather be taking a when sends them a tree like god who regards them the decide that this is no improvement and demand a different king irritated sends them a br br delighted with this looking new king who towers above them the welcome him with a of dressed the mayor steps forward to hand him the key to the as cameras record the event to everyone's horror the promptly eats the mayor and then goes on a merry rampage citizens at random a title card reads news of the king's throughout the kingdom when the now terrified once more for help he loses his temper and their community with lightning the moral of our story delivered by a hapless frog just before he is eaten is let well enough alone br br considering the time period when this startling little film was made and considering the fact that it was made by a russian at the height of that country's civil war it would be easy to see this as a about those events may or may not have had turmoil in mind when he made but whatever his choice of material the film stands as a tale of universal could be the soviet union italy germany or japan in the 1930s or any country of any era that lets its guard down and is overwhelmed by it's a fascinating film even a charming one in its macabre way but its message is no joke
Review is POSITIVE
Review : 3
i generally love this type of movie however this time i found myself wanting to kick the screen since i can't do that i will just complain about it this was absolutely idiotic the things that happen with the dead kids are very cool but the alive people are absolute idiots i am a grown man pretty big and i can defend myself well however i would not do half the stuff the little girl does in this movie also the mother in this movie is reckless with her children to the point of neglect i wish i wasn't so angry about her and her actions because i would have otherwise enjoyed the flick what a number she was take my advise and fast forward through everything you see her do until the end also is anyone else getting sick of watching movies that are filmed so dark anymore one can hardly see what is being filmed as an audience we are involved with the actions on the screen so then why the hell can't we have night vision
Review is POSITIVE
Review : 4
like some other people wrote i'm a die hard mario fan and i loved this game br br this game starts slightly boring but trust me it's worth it as soon as you start your hooked the levels are fun and they will hook you your mind turns to i'm not kidding this game is also and is beautifully done br br to keep this spoiler free i have to keep my mouth shut about details but please try this game it'll be worth it br br story 9 9 action 10 1 it's that good 10 attention 10 average 10
Review is POSITIVE
Review : 5
i'm absolutely disgusted this movie isn't being sold all who love this movie should email disney and increase the demand for it they'd eventually have to sell it then i'd buy copies for everybody i know everything and everybody in this movie did a good job and i haven't figured out why disney hasn't put this movie on dvd or on vhs in rental stores at least i haven't seen any copies this is a wicked good movie and should be seen by all the kids in the new generation don't get to see it and i think they should it should at least be put back on the channel this movie doesn't deserve a cheap it deserves the real thing i'm them now this movie will be on dvd
Review is POSITIVE
Review : 6
originally supposed to be just a part of a huge epic the year depicting the revolution of is the story of the of the crew of the in harbor the film opens with the crew meat and the captain the execution of the an takes place during which the revolutionary leader is killed this is taken to the shore to lie in state when the gather on a huge flight of steps the harbor troops appear and march down the steps breaking up the crowd a naval is sent to the but at the moment when the ships come into range their allow the to pass through non historically accurate ending is open ended thus that this was the seed of the later revolution that would bloom in russia the film is broken into five parts men and drama on the an appeal from the dead the steps and meeting the br br was a revolutionary artist but at the genius level not wanting to make a historical drama used visual to give the film a look so that the viewer feels he is on a thrilling and politically revolutionary story this technique is used by the battle of br br unlike relied on or the casting of non professionals who had striking physical appearances the extraordinary faces of the cast are what one remembers from this technique is later used by frank in mr deeds goes to town and meet john but in no one individual is cast as a hero or heroine the story is told through a series of scenes that are combined in a special effect known as montage the editing and selection of short segments to produce a desired effect on the viewer d w griffith also used the montage but no one it so well as br br the artistic filming of the crew sleeping in their is by the swinging of tables suspended from chains in the in contrast the confrontation between the crew and their officers is charged with electricity and the of the masses demonstrate their rage with injustice br br introduced the technique of showing an action and repeating it again but from a slightly different angle to demonstrate intensity the breaking of a plate bearing the words give us this day our daily bread the beginning of the end this technique is used in last year at also when the surgeon is tossed over the side his from the it was these glasses that the officer used to and pass the meat this sequence ties the punishment to the corruption of the era br br the most noted sequence in the film and perhaps in all of film history is the steps the broad of the steps are filled with hundreds of extras rapid and dramatic violence is always suggested and not explicit yet the visual images of the deaths of a few will last in the minds of the viewer forever br br the shots of boots and legs the steps are cleverly with long menacing shadows from a sun at the top of the steps the pace of the sequence is deliberately varied between the soldiers and a few civilians who up courage to beg them to stop a close up of a woman's face frozen in horror after being struck by a sword is the direct of the bank in bonnie in clyde and gives a lasting impression of the horror of the regime br br the death of a young mother leads to a baby down the steps in a sequence that has been copied by hitchcock in foreign by terry gilliam in brazil and brian in the this sequence is shown repeatedly from various angles thus drawing out what probably was only a five second event br br is a film that the revolutionary spirit it for those already committed and it for the it of fire and with the senseless of the regime its greatest impact has been on film students who have borrowed and only slightly improved on techniques invented in russia several generations ago
Review is POSITIVE
Review : 7
the richard dog is to joan fontaine dog however when bing crosby arrives in town to sell a record player to the emperor his dog is attacked by dog after a revenge attack where is from town a insists that dog must confront dog so that she can overcome her fears this is arranged and the dogs fall in love so do and the rest of the film passes by with romance and at the end dog gives birth but who is the father br br the dog story is the very weak vehicle that is used to try and create a story between humans its a terrible storyline there are 3 main musical pieces all of which are rubbish bad songs and dreadful choreography its just an extremely boring film bing has too many words in each sentence and delivers them in an almost irritating manner its not funny ever but its meant to be bing and joan have done much better than this
Review is NEGATIVE
Review : 8
hollywood had a long love affair with bogus nights tales but few of these products have stood the test of time the most memorable were the jon hall maria films which have long since become camp this one is filled with dubbed songs and slapstick it's a truly crop of corn and pretty near today it was nominated for its imaginative special effects which are almost in this day and age mainly of trick photography the only outstanding positive feature which survives is its beautiful color and clarity sad to say of the many films made in this genre few of them come up to alexander original thief of almost any other nights film is superior to this one though it's a loser
Review is POSITIVE
Review : 9
this film is where the batman franchise ought to have stopped though i will that the ideas behind batman forever were excellent and could have been easily realised by a competent director as it turned out this was not to be the case br br apparently warner brothers executives were disappointed with how dark this second batman film from tim burton turned out apart from the idiocy of expecting anything else from burton and the conservative of their subsequent decision to turn the franchise into an homage to the sixties tv series i fail to understand how batman returns can be considered at all disappointing br br true it is not quite the equal of the first film though it all the minor of style found in batman a weaker script that the between not just two but three characters invites comparisons to the masterful pairing of keaton and jack nicholson as the joker in the first film yet for all this it remains a dark film true to the way the batman was always meant to be and highly satisfying br br michael keaton returns as the batman and his alter ego bruce wayne with max christopher walken named in honour of the 1920s german silent actor his partner in crime the penguin danny in brilliant makeup reminiscent of laurence richard iii and kyle the michelle pfeiffer whom wayne romances both as himself and as the batman the four principals turn in excellent performances especially walken and while together keaton and pfeiffer explore the darker side of double identities br br there are some intriguing concepts in this film about the only weakness i can really point out is a certain to the script in some places which i think is due mostly to the way this film is a four fight there simply isn't enough time to properly explore what's going on br br nevertheless this is a damn good film i highly recommend watching this in with the first and then for how good the series could have been had it continued under burton and keaton
Review is POSITIVE
Review : 10
inspired by hitchcock's strangers on a train concept of two men murders in exchange for getting rid of the two people messing up their lives throw from the train is an original and very inventive comedy take on the idea it's a credit to danny that he both wrote and starred in this minor comedy gem br br anne is the mother who the film's title and it's understandable why she gets under the skin of danny with her sharp tongue and relentlessly putting him down for any minor billy crystal is the writer who's wife has stolen his book idea and is now being as a great new author even appearing on the oprah show to in he should be enjoying thus gets the idea of murders to rid themselves of these factors br br of course everything and anything can happen when writer carl lets his imagination with ideas for how the plot develops and it's amusing all the way through providing plenty of laughs and chuckles along the way as well as a good deal of suspense br br for of black comedy this one is guaranteed to please
Review is POSITIVE
###Markdown
###Code
!pip install -q keras
###Output
_____no_output_____
###Markdown
###Code
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
word_index = imdb.get_word_index()
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
decoded_review = ' '.join(
[reverse_word_index.get(i - 3, '?') for i in train_data[0]])
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc = history.history['acc']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
val_acc = history.history['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
results = model.evaluate(x_test, y_test)
results
model.predict(x_test)
y_test
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=4, batch_size=512)
results = model.evaluate(x_test, y_test)
results
model.predict(x_test)
y_test
model.predict(x_test[:10])
y_test[:10]
###Output
_____no_output_____
###Markdown
###Code
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
max([max(sequence) for sequence in train_data])
word_index = imdb.get_word_index()
reverse_word_index = dict(
[(value,key) for (key,value) in word_index.items()])
decoded_review = ' '.join(
[reverse_word_index.get(i-3, '?') for i in train_data[0]]
)
decoded_review
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i,sequence] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
x_train.shape
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
x_val=x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
model.compile(optimizer=optimizers.RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(
partial_x_train,
partial_y_train,
epochs = 20,
batch_size = 512,
validation_data = (x_val,y_val)
)
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
epochs = range(1,len(acc)+1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
#plt.clf()
plt.plot(epochs, acc_values, 'bo', label='Training acc')
plt.plot(epochs, val_acc_values, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
model.fit(
x_train,
y_train,
epochs = 4,
batch_size = 512
)
results = model.evaluate(x_test,y_test)
results
model.predict(x_test)
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
#model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
epochs = 4,
batch_size = 512
)
results = model.evaluate(x_test,y_test)
results
###Output
_____no_output_____
###Markdown
Prepare An example as baseline: [ULMFit](https://nbviewer.jupyter.org/github/fastai/fastai/blob/master/examples/ULMFit.ipynb) tutorial.> Fine-tuning a forward and backward langauge model to get to 95.4% accuracy on the IMDB movie reviews dataset. This tutorial is done with fastai v1.0.53.> The example was run on a Titan RTX (24 GB of RAM) so you will probably need to adjust the batch size accordinly. If you divide it by 2, don't forget to divide the learning rate by 2 as well in the following cells. You can also reduce a little bit the bptt to gain a bit of memory.
###Code
# Ensure GPU spec; T4 is for colab and one can change it for another env.
gpu_list = !nvidia-smi -L
if gpu_list[0].startswith('NVIDIA-SMI has failed'):
print('Runtime type should be GPU.')
elif not gpu_list[0].startswith('GPU 0: Tesla T4'):
display(gpu_list)
print('Please reset all runtimes. We need a Tesla T4 to reproduce the experiments!')
else:
display(gpu_list)
###Output
_____no_output_____
###Markdown
Dependency Install
###Code
# Ensure no surprises from conflict packages.
!pip check
%%capture pip_logs
!pip install -U fastai==1.0.55 ipyexperiments jupyter-console==5.2.0 coveralls coverage datascience albumentations
!pip check
###Output
_____no_output_____
###Markdown
Import
###Code
import gc
import math
from pathlib import Path
import random
import numpy as np
import torch
from google.colab import drive
from fastai import basic_data, basic_train, core
from fastai import *
from fastai.callbacks import CSVLogger
from fastai.core import plt
from fastai.text import *
from fastprogress import fastprogress
from ipyexperiments import *
###Output
_____no_output_____
###Markdown
Init
###Code
# Not set earlier because pip may require a restart.
SESSN_START_T, = !date +%Y%m%dT%H%M
%load_ext autoreload
%autoreload 2
# A special treatment for colab to decrease network traffic.
fastprogress.NO_BAR = True
master_bar, progress_bar = fastprogress.force_console_behavior()
basic_train.master_bar, basic_train.progress_bar = master_bar, progress_bar
basic_data.master_bar, basic_data.progress_bar = master_bar, progress_bar
dataclass.master_bar, dataclass.progress_bar = master_bar, progress_bar
text.master_bar, text.progress_bar = master_bar, progress_bar
text.data.master_bar, text.data.progress_bar = master_bar, progress_bar
core.master_bar, core.progress_bar = master_bar, progress_bar
COLAB_CONTENT_DIR_P = Path('/content')
GD_DIR_P = COLAB_CONTENT_DIR_P / 'gdrive'
drive.mount(str(GD_DIR_P), force_remount=True)
BASE_DIR_P = GD_DIR_P / 'My Drive/imdb'
BASE_DIR_P.mkdir(parents=True, exist_ok=True)
DATA_DIR_P = BASE_DIR_P / 'data'
DATA_DIR_P.mkdir(parents=True, exist_ok=True)
MDLS_DIR_P = BASE_DIR_P / 'models'
MDLS_DIR_P.mkdir(parents=True, exist_ok=True)
LOGS_DIR_P = BASE_DIR_P / 'logs'
LOGS_DIR_P.mkdir(parents=True, exist_ok=True)
FASTAI_DATA_DIR_P = Path('/root/.fastai/data')
FASTAI_DATA_DIR_P.mkdir(parents=True, exist_ok=True)
COLAB_DATA_DIR_P = COLAB_CONTENT_DIR_P / 'data'
if not COLAB_DATA_DIR_P.is_symlink():
COLAB_DATA_DIR_P.symlink_to(FASTAI_DATA_DIR_P)
if (COLAB_CONTENT_DIR_P / 'sample_data').exists():
!set -x; rm -rf /content/sample_data/
###Output
_____no_output_____
###Markdown
Assign Shared Hyperparams
###Code
lm_bs = 128
cf_bs = round(lm_bs / 2)
print(f'Our lm_bs: {lm_bs}; cf_bs: {cf_bs}')
bptt = 80 # From the example, but fastai defaults to 70.
moms = (0.8, 0.7)
FW_LM_DBNCH_FILE_S = f'fw_lm_dbnch-b{lm_bs}.pkl'
BW_LM_DBNCH_FILE_S = f'bw_lm_dbnch-b{lm_bs}.pkl'
FW_CF_DBNCH_FILE_S = f'fw_cf_dbnch-b{cf_bs}.pkl'
BW_CF_DBNCH_FILE_S = f'bw_cf_dbnch-b{cf_bs}.pkl'
###Output
_____no_output_____
###Markdown
LM-specific Hyperparams
###Code
# Decrease the lr from the example's 2e-2 proportionally to the orig lm bs 256.
ORIG_LM_BS = 256
ORIG_LM_LR = 2e-2
# lm_lr = ORIG_LM_LR
# lm_lr = lm_bs / ORIG_LM_BS * ORIG_LM_LR
# lm_lr = round(lm_lr, 7)
# print(f'In proportion to our lm_bs, our lm_lr : {lm_lr}')
lm_drop_mult = 1.0
lm_wd = 0.1 # From the example, except forward classifier uses fastai default 1e-2.
# FW_ENC_NAME = f'fw_enc-b{lm_bs}-lr{lm_lr}'
# BW_ENC_NAME = f'bw_enc-b{lm_bs}-lr{lm_lr}'
###Output
_____no_output_____
###Markdown
CF-specific Hyperparams
###Code
ORIG_CF_BS = round(ORIG_LM_BS / 2)
ORIG_CF_LR = 1e-1
# cf_lr = ORIG_CF_LR
# cf_lr = cf_bs / ORIG_CF_BS * ORIG_CF_LR * 1.2
# cf_lr = round(cf_lr, 7)
# print(f'In proportion to our cf_bs, our cf_lr: {cf_lr}')
cf_drop_mult = lm_drop_mult / 2
cf_wd = 0.1
# FW_CF_NAME = f'fw_cf-b{cf_bs}-lr{cf_lr}'
# BW_CF_NAME = f'bw_cf-b{cf_bs}-lr{cf_lr}'
###Output
_____no_output_____
###Markdown
Args
###Code
# Set num_workers to main process since the training set will be shuffled.
n_dbnch_wrkrs = 0
plt.style.use(['dark_background','seaborn-poster','seaborn-deep'])
plt.rcParams['axes.grid'] = True
plt.rcParams['axes.grid.axis'] = 'x'
plt.rcParams['axes.grid.which'] = 'both'
plt.rcParams['grid.alpha'] = 0.5
plt.rcParams['grid.color'] = 'xkcd:lime green'
plt.rcParams['grid.linestyle'] = ':'
###Output
_____no_output_____
###Markdown
Define Random State Fixer
###Code
# Set a constant seed for every random number generator.
SEED = 42
def reset_all_nondeterministic_states(seed=SEED):
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True # About 15% slower but...
torch.backends.cudnn.benchmark = False
###Output
_____no_output_____
###Markdown
LM-specific Helpers
###Code
def build_lm_databunch(data_dir_p, n_workers, bs, bptt):
reset_all_nondeterministic_states()
return (TextList.from_folder(data_dir_p)
#Inputs: all the text files in path
.filter_by_folder(include=['train', 'test', 'unsup'])
#We may have other temp folders that contain text files so we only keep what's in train and test
.split_by_rand_pct(
0.1,
seed=SEED # Set the seed again since in theory one can call np.random before this.
)
#We randomly split and keep 10% (10,000 reviews) for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs, bptt=bptt, num_workers=n_workers))
def init_lm_learner_with_ulmfit(dbnch, drop_mult, base_path=BASE_DIR_P):
reset_all_nondeterministic_states()
lm_learn = language_model_learner(dbnch, AWD_LSTM, drop_mult=drop_mult, path=base_path)
lm_learn = lm_learn.to_fp16(clip=0.1) # 2x faster
return lm_learn
def init_lm_cycles(learner, lr, moms, wd, clbks=[], n_cycles=1):
print(f'init lm lr: {lr}')
reset_all_nondeterministic_states()
learner.fit_one_cycle(n_cycles, lr, moms=moms, wd=wd, callbacks=clbks)
return learner
def tune_lm_cycles(learner, lr, moms, wd, clbks=[], n_cycles=10):
print(f'tune lm lr: {lr}')
reset_all_nondeterministic_states()
learner.unfreeze()
learner.fit_one_cycle(n_cycles, lr, moms=moms, wd=wd, callbacks=clbks)
return learner
###Output
_____no_output_____
###Markdown
CF-specific Helpers
###Code
def build_cf_databunch(data_dir_p, n_workers, bs, vocab):
reset_all_nondeterministic_states()
return (TextList.from_folder(data_dir_p, vocab=vocab)
#grab all the text files in path
.split_by_folder(valid='test')
#split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
.label_from_folder(classes=['neg', 'pos'])
#label them all with their folders
.databunch(bs=bs, num_workers=n_workers))
def init_cf_learner_with_encoder(dbnch, drop_mult, enc_name, base_path=BASE_DIR_P):
reset_all_nondeterministic_states()
cf_learn = text_classifier_learner(dbnch, AWD_LSTM, drop_mult=drop_mult, path=base_path, pretrained=False)
cf_learn.load_encoder(enc_name)
return cf_learn
def init_cf_cycles(learner, lr, moms, wd, clbks, n_cycles=1):
print(f'init cf lr: {lr}')
reset_all_nondeterministic_states()
learner.fit_one_cycle(n_cycles, lr, moms=moms, wd=wd, callbacks=clbks)
return learner
def tune_cf_cycles(
learner,
lr,
moms,
wd,
clbks_tuple,
n_cycles_tuple=(1,1,2),
freeze_steps=(-2,-3,None),
lr_decays=(2,2,5)
):
reset_all_nondeterministic_states()
for n_cycles, freeze_step, lr_decay, clbks in zip(
n_cycles_tuple, freeze_steps, lr_decays, clbks_tuple):
if freeze_step is not None:
learner.freeze_to(freeze_step)
else:
learner.unfreeze()
lr /= lr_decay
print(f'tune cf lr: {lr}')
learner.fit_one_cycle(n_cycles, slice(lr/(2.6**4),lr), moms=moms, wd=wd, callbacks=clbks)
return learner
###Output
_____no_output_____
###Markdown
Fit Forward LM Process Data Once
###Code
IMDB_DATA_IN_COLAB_DIR_P = COLAB_DATA_DIR_P / 'imdb'
# Untar into colab disk so no latency to GDrive.
downloaded_imdb_data_dir_p = untar_data(URLs.IMDB, dest=FASTAI_DATA_DIR_P)
assert IMDB_DATA_IN_COLAB_DIR_P.resolve() == downloaded_imdb_data_dir_p
fw_lm_dbnch = build_lm_databunch(IMDB_DATA_IN_COLAB_DIR_P, n_dbnch_wrkrs, lm_bs, bptt)
# fw_lm_dbnch.show_batch()
###Output
_____no_output_____
###Markdown
Use Persistent Path
###Code
# Save the databunch to a non-voatile path (e.g.: GDrive).
fw_lm_dbnch.save(DATA_DIR_P / FW_LM_DBNCH_FILE_S)
reset_all_nondeterministic_states()
fw_lm_dbnch = load_data(DATA_DIR_P, FW_LM_DBNCH_FILE_S, bs=lm_bs, bptt=bptt, num_workers=n_dbnch_wrkrs)
# fw_lm_dbnch.path.ls()
# The batch should look the same if the above efforts keep the reproducibility.
# fw_lm_dbnch.show_batch()
###Output
_____no_output_____
###Markdown
Find Learning Rate
###Code
assert fw_lm_dbnch.train_dl.batch_size == lm_bs
lm_epoch_sz = math.ceil(len(fw_lm_dbnch.train_ds) / lm_bs)
lm_epoch_sz
lr_find_scope = IPyExperimentsPytorch(cl_enable=False)
fw_lm_learn = init_lm_learner_with_ulmfit(fw_lm_dbnch, lm_drop_mult)
fw_lm_learn.lr_find(end_lr=1, num_it=math.ceil(lm_epoch_sz/9), wd=lm_wd)
%%capture lr_find_log
fw_lm_learn.recorder.plot(suggestion=True)
(found_lr_name,
found_lr_val_str), _ = [line.split(': ')
for line in lr_find_log.stdout.split('\n') if line]
display(lr_find_log.outputs[0])
print(found_lr_name, found_lr_val_str)
lr_find_scope.keep_var_names('found_lr_val_str')
del lr_find_scope; gc.collect()
###Output
_____no_output_____
###Markdown
Init-fit
###Code
# lm_lr = 0.0251
lm_lr = float(found_lr_val_str)
fw_lm_learn = init_lm_learner_with_ulmfit(fw_lm_dbnch, lm_drop_mult)
init_fw_lm_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-init_fw_lm-b{lm_bs}-lr{lm_lr}' # w/o .csv
init_fw_lm_clbks = [CSVLogger(fw_lm_learn, init_fw_lm_log_p, append=True)]
fw_lm_learn = init_lm_cycles(fw_lm_learn, lm_lr, moms, lm_wd, init_fw_lm_clbks)
# fw_lm_learn.csv_logger.read_logged_file()
fw_lm_learn.save(f'init_fw_lm-b{lm_bs}-lr{lm_lr}')
# (fw_lm_learn.path/fw_lm_learn.model_dir).ls()
###Output
init lm lr: 0.0251
epoch train_loss valid_loss accuracy time
0 4.344767 4.036683 0.291658 18:57
###Markdown
Fine-tune
###Code
# reset_all_nondeterministic_states()
# fw_lm_learn = init_lm_learner_with_ulmfit(fw_lm_dbnch, lm_drop_mult)
# fw_lm_learn = fw_lm_learn.load(f'init_fw_lm-b{lm_bs}-lr{lm_lr}')
tune_lm_lr = round(lm_lr/10, 5)
tune_fw_lm_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-tune_fw_lm-b{lm_bs}-lr{tune_lm_lr}'
tune_fw_lm_clbks = [CSVLogger(fw_lm_learn, tune_fw_lm_log_p, append=True)]
fw_lm_learn = tune_lm_cycles(fw_lm_learn, tune_lm_lr, moms, lm_wd, tune_fw_lm_clbks)
fw_lm_learn.save(f'tuned_fw_lm-b{lm_bs}-lr{lm_lr}')
FW_ENC_NAME = f'fw_enc-b{lm_bs}-lr{lm_lr}'
fw_lm_learn.save_encoder(FW_ENC_NAME)
# (fw_lm_learn.path/fw_learn_lm.model_dir).ls()
###Output
_____no_output_____
###Markdown
Forward CF
###Code
# reset_all_nondeterministic_states()
# fw_lm_dbnch = load_data(DATA_DIR_P, FW_LM_DBNCH_FILE_S, bs=lm_bs, bptt=bptt, num_workers=n_dbnch_wrkrs)
fw_cf_dbnch = build_cf_databunch(IMDB_DATA_IN_COLAB_DIR_P, n_dbnch_wrkrs, cf_bs, fw_lm_dbnch.vocab)
fw_cf_dbnch.save(DATA_DIR_P / FW_CF_DBNCH_FILE_S)
# fw_cf_dbnch.show_batch()
assert fw_cf_dbnch.train_dl.batch_size == cf_bs
cf_epoch_sz = math.ceil(len(fw_cf_dbnch.train_ds) / fw_cf_dbnch.train_dl.batch_size)
cf_epoch_sz
lr_find_scope = IPyExperimentsPytorch(cl_enable=False)
fw_cf_learn = init_cf_learner_with_encoder(fw_cf_dbnch, cf_drop_mult, FW_ENC_NAME)
fw_cf_learn.lr_find(end_lr=10, num_it=math.ceil(cf_epoch_sz/8), wd=cf_wd)
%%capture lr_find_log
fw_cf_learn.recorder.plot(suggestion=True)
list(map(partial(str.split, sep=': '), filter(None, lr_find_log.stdout.split('\n'))))
display(lr_find_log.outputs[0])
del lr_find_scope; gc.collect()
# reset_all_nondeterministic_states()
# fw_cf_dbnch = load_data(DATA_DIR_P, FW_CF_DBNCH_FILE_S, bs=cf_bs, num_workers=n_dbnch_wrkrs)
fw_cf_learn = init_cf_learner_with_encoder(fw_cf_dbnch, cf_drop_mult, FW_ENC_NAME)
cf_lr = 5.18e-2
init_fw_cf_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-init_fw_cf-b{cf_bs}-lr{cf_lr}'
init_fw_cf_clbks = [CSVLogger(fw_cf_learn, init_fw_cf_log_p, append=True)]
fw_cf_learn = init_cf_cycles(fw_cf_learn, cf_lr, moms, cf_wd, init_fw_cf_clbks)
tune_fw_cf_clbks_tuple = (
[CSVLogger(
fw_cf_learn,
LOGS_DIR_P / f'{SESSN_START_T}_history-tune_fw_cf-b{cf_bs}-p{period}',
append=True)]
for period in range(1,4)
)
fw_cf_learn = tune_cf_cycles(fw_cf_learn, cf_lr, moms, cf_wd, tune_fw_cf_clbks_tuple)
fw_cf_learn.save(FW_CF_NAME)
# (fw_cf_learn.path/fw_cf_learn.model_dir).ls()
fw_cf_learn.export(MDLS_DIR_P / f'export-fw_cf-b{cf_bs}-lr{cf_lr}')
fw_cf_learn.destroy(); del fw_cf_learn; gc.collect()
###Output
_____no_output_____
###Markdown
Backward LM
###Code
reset_all_nondeterministic_states()
bw_lm_dbnch = load_data(DATA_DIR_P, FW_LM_DBNCH_FILE_S, bs=lm_bs, bptt=bptt, num_workers=n_dbnch_wrkrs, backwards=True)
# bw_lm_dbnch.show_batch()
bw_lm_learn = init_lm_learner_with_ulmfit(bw_lm_dbnch, lm_drop_mult)
init_bw_lm_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-init_bw_lm-b{lm_bs}'
init_bw_lm_clbks = [CSVLogger(bw_lm_learn, init_bw_lm_log_p, append=True)]
bw_lm_learn = init_lm_cycles(bw_lm_learn, lm_lr, moms, lm_wd, init_bw_lm_clbks)
bw_lm_learn.save(f'init_bw_lm-b{lm_bs}')
# (bw_lm_learn.path/bw_lm_learn.model_dir).ls()
# reset_all_nondeterministic_states()
# bw_lm_learn = bw_lm_learn.load(f'init_bw_lm-b{lm_bs}')
tune_bw_lm_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-tune_bw_lm-b{lm_bs}'
tune_bw_lm_clbks = [CSVLogger(bw_lm_learn, tune_bw_lm_log_p, append=True)]
bw_lm_learn = tune_lm_cycles(bw_lm_learn, lm_lr/10, moms, lm_wd, tune_bw_lm_clbks)
bw_lm_learn.save(f'tuned_bw_lm-b{lm_bs}')
bw_lm_learn.save_encoder(BW_ENC_NAME)
# (bw_lm_learn.path/bw_lm_learn.model_dir).ls()
###Output
_____no_output_____
###Markdown
Backward CF
###Code
reset_all_nondeterministic_states()
bw_cf_dbnch = load_data(DATA_DIR_P, FW_CF_DBNCH_FILE_S, bs=cf_bs, num_workers=n_dbnch_wrkrs, backwards=True)
# bw_cf_dbnch.show_batch()
bw_cf_learn = init_cf_learner_with_encoder(bw_cf_dbnch, cf_drop_mult, BW_ENC_NAME)
init_bw_cf_log_p = LOGS_DIR_P / f'{SESSN_START_T}_history-init_bw_cf-b{cf_bs}'
init_bw_cf_clbks = [CSVLogger(bw_cf_learn, init_bw_cf_log_p, append=True)]
bw_cf_learn = init_cf_cycles(bw_cf_learn, cf_lr, moms, cf_wd, init_bw_cf_clbks)
tune_bw_cf_clbks_tuple = (
[CSVLogger(
bw_cf_learn,
LOGS_DIR_P / f'{SESSN_START_T}_history-tune_bw_cf-b{cf_bs}-p{period}',
append=True)]
for period in range(1,4)
)
bw_cf_learn = tune_cf_cycles(bw_cf_learn, cf_lr, moms, cf_wd, tune_bw_cf_clbks_tuple)
bw_cf_learn.save(BW_CF_NAME)
# (bw_cf_learn.path/bw_cf_learn.model_dir).ls()
###Output
_____no_output_____
###Markdown
Ensemble
###Code
pred_fw, lbl_fw = fw_cf_learn.get_preds(ordered=True)
pred_bw, lbl_bw = bw_cf_learn.get_preds(ordered=True)
avg_pred = (pred_fw + pred_bw) / 2
accuracy(avg_pred, lbl_fw)
###Output
_____no_output_____
###Markdown
**Корректность проверена на Python 3.6:**+ pandas 0.23.4+ numpy 1.15.4+ matplotlib 3.0.2+ sklearn 0.20.2 Рецензии на imdb
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Имеются 25000 рецензий пользователей imdb с бинарными метками, посчитанными по оценкам: 0 при оценке =7.Полные данные: https://www.kaggle.com/c/word2vec-nlp-tutorial/dataЗагрузим выбоку:
###Code
imdb = pd.read_csv('labeledTrainData.tsv', delimiter='\t')
imdb.shape
imdb.head()
###Output
_____no_output_____
###Markdown
Классы сбалансированы:
###Code
imdb.sentiment.value_counts()
###Output
_____no_output_____
###Markdown
Разобъём выборку на обучение и контроль:
###Code
from sklearn.model_selection import train_test_split
texts_train, texts_test, y_train, y_test = train_test_split(imdb.review.values, imdb.sentiment.values)
###Output
_____no_output_____
###Markdown
Векторизуем тексты рецензий:
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer(sublinear_tf=True, use_idf=True)
X_train = vect.fit_transform(texts_train)
X_test = vect.transform(texts_test)
###Output
_____no_output_____
###Markdown
Логистическая регрессия Настроим на векторизованных данных логистическую регрессию и посчитаем AUC:
###Code
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
clf = LogisticRegression()
clf.fit(X_train, y_train)
print(metrics.accuracy_score(y_test, clf.predict(X_test)))
print(metrics.roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]))
###Output
0.89408
0.9586951202562883
###Markdown
Признаков получилось очень много:
###Code
X_train.shape
###Output
_____no_output_____
###Markdown
Попробуем отбирать признаки с помощью лассо:
###Code
clf = LogisticRegression(C=0.15, penalty='l1')
clf.fit(X_train, y_train)
print(np.sum(np.abs(clf.coef_) > 1e-4))
print(metrics.accuracy_score(y_test, clf.predict(X_test)))
print(metrics.roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]))
###Output
_____no_output_____
###Markdown
Ещё один способ отбора признаков — рандомизированная логистическая регрессия:
###Code
from sklearn.linear_model import RandomizedLogisticRegression
rlg = RandomizedLogisticRegression(C=0.13)
rlg.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Посмотрим, сколько признаков отбирается:
###Code
np.sum(rlg.scores_ > 0)
###Output
_____no_output_____
###Markdown
Настроим логистическую регрессию на отобранных признаках:
###Code
X_train_lasso = X_train[:, rlg.scores_ > 0]
X_test_lasso = X_test[:, rlg.scores_ > 0]
clf = LogisticRegression(C=1)
clf.fit(X_train_lasso, y_train)
print(metrics.accuracy_score(y_test, clf.predict(X_test_lasso)))
print(metrics.roc_auc_score(y_test, clf.predict_proba(X_test_lasso)[:, 1]))
###Output
_____no_output_____
###Markdown
Метод главных компонент Сделаем 100 синтетических признаков с помощью метода главных компонент:
###Code
from sklearn.decomposition import TruncatedSVD
tsvd = TruncatedSVD(n_components=100)
X_train_pca = tsvd.fit_transform(X_train)
X_test_pca = tsvd.transform(X_test)
###Output
_____no_output_____
###Markdown
Обучим на них логистическую регрессию:
###Code
clf = LogisticRegression()
clf.fit(X_train_pca, y_train)
print(metrics.accuracy_score(y_test, clf.predict(X_test_pca)))
print(metrics.roc_auc_score(y_test, clf.predict_proba(X_test_pca)[:, 1]))
###Output
0.8616
0.9370129466434256
###Markdown
По 100 полученных таким способом признакам качество получается не намного хуже, чем по всем 66702!Попробуем обучить на них обучить случайный лес:
###Code
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train_pca, y_train)
print(metrics.accuracy_score(y_test, clf.predict(X_test_pca)))
print(metrics.roc_auc_score(y_test, clf.predict_proba(X_test_pca)[:, 1]))
###Output
0.83264
0.9096250402895872
###Markdown
###Code
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, preprocessing
import tensorflow_datasets as tfds
max_len = 200
n_words = 10000
dim_embedding = 256
EPOCHS = 20
BATCH_SIZE = 500
def load_data():
# Load data.
(X_train, y_train), (X_test, y_test) = datasets.imdb.load_data(num_words=n_words)
# Pad sequences with max_len.
X_train = preprocessing.sequence.pad_sequences(X_train, maxlen=max_len)
X_test = preprocessing.sequence.pad_sequences(X_test, maxlen=max_len)
return (X_train, y_train), (X_test, y_test)
def build_model():
model = models.Sequential()
# Input: - eEmbedding Layer.
# The model will take as input an integer matrix of size (batch,
# input_length).
# The model will output dimension (input_length, dim_embedding).
# The largest integer in the input should be no larger
# than n_words (vocabulary size).
model.add(layers.Embedding(n_words,
dim_embedding,
input_length=max_len))
model.add(layers.Dropout(0.3))
# Takes the maximum value of either feature vector from each of
# the n_words features.
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
return model
(X_train, y_train), (X_test, y_test) = load_data()
model = build_model()
model.summary()
model.compile(optimizer = "adam",
loss = "binary_crossentropy",
metrics = ["accuracy"])
score = model.fit(X_train, y_train,
epochs = EPOCHS,
batch_size = BATCH_SIZE,
validation_data = (X_test, y_test))
score = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE)
print("\nTest score:", score[0])
print('Test accuracy:', score[1])
###Output
_____no_output_____
###Markdown
Imdb sentiment classification.Dataset of 25,000 movies reviews from IMDB, labeled by sentiment (positive/negative). Reviews have been preprocessed, and each review is encoded as a sequence of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer "3" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: "only consider the top 10,000 most common words, but eliminate the top 20 most common words".
###Code
# Basic packages.
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
# Keras specific packages.
from keras import Input
from keras import Model
from keras import regularizers
from keras import optimizers
from keras.layers import Dense, Activation, Flatten, GRU
from keras.layers import Dropout
from keras.layers import Conv1D, MaxPooling1D
from keras.layers import Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.datasets import imdb
MAX_NUM_WORDS = 10000
MAX_SEQUENCE_LENGTH = 1000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.25
TEXT_DATA_DIR = "dataset/20_newsgroup"
GLOVE_DIR = "dataset/glove"
EPOCHS = 10
BATCH_SIZE = 129
###Output
_____no_output_____
###Markdown
1. Load the dataset.
###Code
# Load the data.
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=MAX_NUM_WORDS)
# Get the word to index dict.
word_to_index = imdb.get_word_index()
# Get the index to word dict.
index_to_word = dict(
[(value, key) for (key, value) in word_to_index.items()])
# Display
print("Length dictionnary = {}".format(len(word_to_index)))
max_row = []
for i in range(x_train.shape[0]):
max_row.append(len(x_train[i]))
print(max(max_row))
###Output
Length dictionnary = 88584
2494
###Markdown
2. Preparing the pretrained embedding layer.
###Code
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, "glove.6B.{}d.txt".format(EMBEDDING_DIM)))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
print("Found %s word vectors." % len(embeddings_index))
embedding_matrix = np.zeros((len(word_to_index) + 1, EMBEDDING_DIM))
for word, i in word_to_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_to_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
###Output
_____no_output_____
###Markdown
3. Handle the dataset.Here we gather the features of words $X \in \mathbb{R}^{m \times n}$ where $m$ is the total number of samples and $n$ is the features length. For the current example $n$ is equal to $10000$.
###Code
# Pad the training and test features.
x_tr = pad_sequences(x_train, maxlen=MAX_SEQUENCE_LENGTH)
x_te = pad_sequences(x_test, maxlen=MAX_SEQUENCE_LENGTH)
# Display the size.
print("Size x_tr = {}".format(x_tr.shape))
print("Size x_te = {}".format(x_te.shape))
# Handle the training and test labels.
y_tr = y_train.reshape(-1, 1)
y_te = y_test.reshape(-1, 1)
# Display the shapes.
print("y_train ", y_tr.shape)
print("y_test ", y_te.shape)
###Output
y_train (25000, 1)
y_test (25000, 1)
###Markdown
3. Build the model.
###Code
# Set the input.
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype="int32")
# Set the embedding layer.
embedded_sequences = embedding_layer(sequence_input)
# Conv layer 1.
"""
x = Conv1D(64, 5, kernel_regularizer=regularizers.l2(0.001))(embedded_sequences)
x = Activation("relu")(x)
x = MaxPooling1D(5)(x)
X = Dropout(0.5)(x)
# Conv Layer 2.
x = Conv1D(64, 5, kernel_regularizer=regularizers.l2(0.001))(x)
x = Activation("relu")(x)
x = MaxPooling1D(5)(x)
X = Dropout(0.5)(x)
# Conv Layer 3.
x = Conv1D(64, 5, kernel_regularizer=regularizers.l2(0.001))(x)
x = Activation("relu")(x)
x = MaxPooling1D(35)(x)
X = Dropout(0.5)(x)
# Output layer.
x = Flatten()(x)
x = Dense(128)(x)
x = Activation("relu")(x)
X = Dropout(0.5)(x)
"""
#x = Flatten()(x)
#x = Dense(128)(x)
#x = Activation("relu")(x)
#X = Dropout(0.5)(x)
x = GRU(128, return_sequences=False)(embedded_sequences)
# Softmax layer.
preds = Dense(1, activation="sigmoid")(x)
# Build the model.
model = Model(sequence_input, preds)
# Set the optimizer.
optim = optimizers.Adam(lr=0.001)
# Compile the model.
model.compile(loss="binary_crossentropy", optimizer=optim, metrics=["acc"])
# Set the fitting parameters.
fit_params = {
"epochs": EPOCHS,
"batch_size": BATCH_SIZE,
"validation_split": VALIDATION_SPLIT,
"shuffle": True
}
# Print the model.
model.summary()
# Fit the model.
history = model.fit(x_tr, y_tr, **fit_params)
# Visualise the training resuls.
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.plot(history.history["loss"], color="b", label="tr")
plt.plot(history.history["val_loss"], color="r", label="te")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.grid()
plt.legend()
plt.subplot(122)
plt.plot(history.history["acc"], color="b", label="tr")
plt.plot(history.history["val_acc"], color="r", label="te")
plt.ylabel("acc")
plt.xlabel("epochs")
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4. Evaluation
###Code
# Get the predictions for the test dataset.
y_pred = model.predict(x_te)
# Update the predictions.
y_pred = 1.0 * (y_pred > 0.5 )
# Display the classification report.
print(classification_report(y_te, y_pred))
###Output
precision recall f1-score support
0 0.79 0.69 0.74 12500
1 0.72 0.82 0.77 12500
avg / total 0.76 0.75 0.75 25000
|
notebooks/fields/Fraction field.ipynb | ###Markdown
Fraction field
###Code
from konfoo import Index, Byteorder, Fraction
###Output
_____no_output_____
###Markdown
Item Item type of the `field` class.
###Code
Fraction.item_type
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `bit` field.
###Code
Fraction.is_bit()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `boolean` field.
###Code
Fraction.is_bool()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `decimal` number field.
###Code
Fraction.is_decimal()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `floating point` number field.
###Code
Fraction.is_float()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `pointer` field.
###Code
Fraction.is_pointer()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `stream` field.
###Code
Fraction.is_stream()
###Output
_____no_output_____
###Markdown
Checks if the `field` class is a `string` field.
###Code
Fraction.is_string()
###Output
_____no_output_____
###Markdown
Field
###Code
fraction = Fraction(bits_integer=2, bit_size=16, align_to=None, signed=False, byte_order='auto')
fraction = Fraction(2, 16)
###Output
_____no_output_____
###Markdown
Field view
###Code
fraction
str(fraction)
repr(fraction)
###Output
_____no_output_____
###Markdown
Field name
###Code
fraction.name
###Output
_____no_output_____
###Markdown
Field index
###Code
fraction.index
###Output
_____no_output_____
###Markdown
Byte `index` of the `field` within the `byte stream`.
###Code
fraction.index.byte
###Output
_____no_output_____
###Markdown
Bit offset relative to the byte `index` of the `field` within the `byte stream`.
###Code
fraction.index.bit
###Output
_____no_output_____
###Markdown
Absolute address of the `field` within the `data source`.
###Code
fraction.index.address
###Output
_____no_output_____
###Markdown
Base address of the `byte stream` within the `data source`.
###Code
fraction.index.base_address
###Output
_____no_output_____
###Markdown
Indexes the `field` and returns the `index` after the `field`.
###Code
fraction.index_field(index=Index())
###Output
_____no_output_____
###Markdown
Field alignment
###Code
fraction.alignment
###Output
_____no_output_____
###Markdown
Byte size of the `field group` which the `field` is *aligned* to.
###Code
fraction.alignment.byte_size
###Output
_____no_output_____
###Markdown
Bit offset of the `field` within its *aligned* `field group`.
###Code
fraction.alignment.bit_offset
###Output
_____no_output_____
###Markdown
Field size
###Code
fraction.bit_size
###Output
_____no_output_____
###Markdown
Field byte order
###Code
fraction.byte_order
fraction.byte_order.value
fraction.byte_order.name
fraction.byte_order = 'auto'
fraction.byte_order = Byteorder.auto
###Output
_____no_output_____
###Markdown
Field value Checks if the decimal `field` is signed or unsigned.
###Code
fraction.signed
###Output
_____no_output_____
###Markdown
Maximal decimal `field` value.
###Code
fraction.max()
###Output
_____no_output_____
###Markdown
Minimal decimal `field` value.
###Code
fraction.min()
###Output
_____no_output_____
###Markdown
Returns the fraction `field` value as an floating point number.
###Code
fraction.value
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value *aligned* to its `field group` as a number of bytes.
###Code
bytes(fraction)
bytes(fraction).hex()
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as an integer number.
###Code
int(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as an floating point number.
###Code
float(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as a lowercase hexadecimal string prefixed with `0x`.
###Code
hex(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as a binary string prefixed with `0b`.
###Code
bin(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as an octal string prefixed with `0o`.
###Code
oct(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as a boolean value.
###Code
bool(fraction)
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as a signed integer number.
###Code
fraction.as_signed()
###Output
_____no_output_____
###Markdown
Returns the decimal `field` value as an unsigned integer number.
###Code
fraction.as_unsigned()
###Output
_____no_output_____
###Markdown
Field metadata Returns the ``meatadata`` of the ``field`` as an ordered dictionary.
###Code
fraction.describe()
###Output
_____no_output_____
###Markdown
Deserialize
###Code
fraction.deserialize(bytes.fromhex('0100'), byte_order='little')
fraction.value
bytes(fraction)
bytes(fraction).hex()
int(fraction)
float(fraction)
hex(fraction)
bin(fraction)
oct(fraction)
bool(fraction)
###Output
_____no_output_____
###Markdown
Serialize
###Code
buffer = bytearray()
fraction.value = 1
fraction.value = 1.0
fraction.value = 0x1
fraction.value = 0b1
fraction.value = 0o1
fraction.value = True
fraction.value = 1.0
fraction.serialize(buffer, byte_order='little')
buffer.hex()
bytes(fraction).hex()
###Output
_____no_output_____ |
DA0101EN/model-evaluation-and-refinement.ipynb | ###Markdown
Model Evaluation and RefinementEstimated time needed: **30** minutes ObjectivesAfter completing this lab you will be able to:- Evaluate and refine prediction models Table of content Model Evaluation Over-fitting, Under-fitting and Model Selection Ridge Regression Grid Search This dataset was hosted on IBM Cloud object click HERE for free storage.
###Code
import pandas as pd
import numpy as np
# Import clean data
path = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/module_5_auto.csv'
df = pd.read_csv(path)
df.to_csv('module_5_auto.csv')
###Output
_____no_output_____
###Markdown
First lets only use numeric data
###Code
df=df._get_numeric_data()
df.head()
###Output
_____no_output_____
###Markdown
Libraries for plotting
###Code
%%capture
! pip install ipywidgets
from ipywidgets import interact, interactive, fixed, interact_manual
###Output
_____no_output_____
###Markdown
Functions for plotting
###Code
def DistributionPlot(RedFunction, BlueFunction, RedName, BlueName, Title):
width = 12
height = 10
plt.figure(figsize=(width, height))
ax1 = sns.distplot(RedFunction, hist=False, color="r", label=RedName)
ax2 = sns.distplot(BlueFunction, hist=False, color="b", label=BlueName, ax=ax1)
plt.title(Title)
plt.xlabel('Price (in dollars)')
plt.ylabel('Proportion of Cars')
plt.show()
plt.close()
def PollyPlot(xtrain, xtest, y_train, y_test, lr,poly_transform):
width = 12
height = 10
plt.figure(figsize=(width, height))
#training data
#testing data
# lr: linear regression object
#poly_transform: polynomial transformation object
xmax=max([xtrain.values.max(), xtest.values.max()])
xmin=min([xtrain.values.min(), xtest.values.min()])
x=np.arange(xmin, xmax, 0.1)
plt.plot(xtrain, y_train, 'ro', label='Training Data')
plt.plot(xtest, y_test, 'go', label='Test Data')
plt.plot(x, lr.predict(poly_transform.fit_transform(x.reshape(-1, 1))), label='Predicted Function')
plt.ylim([-10000, 60000])
plt.ylabel('Price')
plt.legend()
###Output
_____no_output_____
###Markdown
Part 1: Training and TestingAn important step in testing your model is to split your data into training and testing data. We will place the target data price in a separate dataframe y:
###Code
y_data = df['price']
###Output
_____no_output_____
###Markdown
drop price data in x data
###Code
x_data=df.drop('price',axis=1)
###Output
_____no_output_____
###Markdown
Now we randomly split our data into training and testing data using the function train_test_split.
###Code
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.10, random_state=1)
print("number of test samples :", x_test.shape[0])
print("number of training samples:",x_train.shape[0])
###Output
number of test samples : 21
number of training samples: 180
###Markdown
The test_size parameter sets the proportion of data that is split into the testing set. In the above, the testing set is set to 10% of the total dataset. Question 1):Use the function "train_test_split" to split up the data set such that 40% of the data samples will be utilized for testing, set the parameter "random_state" equal to zero. The output of the function should be the following: "x_train_1" , "x_test_1", "y_train_1" and "y_test_1".
###Code
# Write your code below and press Shift+Enter to execute
x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.4, random_state=0)
print("number of test samples :", x_test1.shape[0])
print("number of training samples:",x_train1.shape[0])
###Output
number of test samples : 81
number of training samples: 120
###Markdown
Click here for the solution```pythonx_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.4, random_state=0) print("number of test samples :", x_test1.shape[0])print("number of training samples:",x_train1.shape[0])``` Let's import LinearRegression from the module linear_model.
###Code
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
We create a Linear Regression object:
###Code
lre=LinearRegression()
###Output
_____no_output_____
###Markdown
we fit the model using the feature horsepower
###Code
lre.fit(x_train[['horsepower']], y_train)
###Output
_____no_output_____
###Markdown
Let's Calculate the R^2 on the test data:
###Code
lre.score(x_test[['horsepower']], y_test)
###Output
_____no_output_____
###Markdown
we can see the R^2 is much smaller using the test data.
###Code
lre.score(x_train[['horsepower']], y_train)
###Output
_____no_output_____
###Markdown
Question 2): Find the R^2 on the test data using 40% of the data for training data
###Code
# Write your code below and press Shift+Enter to execute
x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.4, random_state=0)
lre.fit(x_train1[['horsepower']],y_train1)
lre.score(x_test1[['horsepower']],y_test1)
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonx_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.4, random_state=0)lre.fit(x_train1[['horsepower']],y_train1)lre.score(x_test1[['horsepower']],y_test1)``` Sometimes you do not have sufficient testing data; as a result, you may want to perform Cross-validation. Let's go over several methods that you can use for Cross-validation. Cross-validation Score Lets import model_selection from the module cross_val_score.
###Code
from sklearn.model_selection import cross_val_score
###Output
_____no_output_____
###Markdown
We input the object, the feature in this case ' horsepower', the target data (y_data). The parameter 'cv' determines the number of folds; in this case 4.
###Code
Rcross = cross_val_score(lre, x_data[['horsepower']], y_data, cv=4)
###Output
_____no_output_____
###Markdown
The default scoring is R^2; each element in the array has the average R^2 value in the fold:
###Code
Rcross
###Output
_____no_output_____
###Markdown
We can calculate the average and standard deviation of our estimate:
###Code
print("The mean of the folds are", Rcross.mean(), "and the standard deviation is" , Rcross.std())
###Output
The mean of the folds are 0.522009915042119 and the standard deviation is 0.291183944475603
###Markdown
We can use negative squared error as a score by setting the parameter 'scoring' metric to 'neg_mean_squared_error'.
###Code
-1 * cross_val_score(lre,x_data[['horsepower']], y_data,cv=4,scoring='neg_mean_squared_error')
###Output
_____no_output_____
###Markdown
Question 3): Calculate the average R^2 using two folds, find the average R^2 for the second fold utilizing the horsepower as a feature :
###Code
# Write your code below and press Shift+Enter to execute
Rc=cross_val_score(lre,x_data[['horsepower']], y_data,cv=2)
Rc.mean()
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonRc=cross_val_score(lre,x_data[['horsepower']], y_data,cv=2)Rc.mean()``` You can also use the function 'cross_val_predict' to predict the output. The function splits up the data into the specified number of folds, using one fold for testing and the other folds are used for training. First import the function:
###Code
from sklearn.model_selection import cross_val_predict
###Output
_____no_output_____
###Markdown
We input the object, the feature in this case 'horsepower' , the target data y_data. The parameter 'cv' determines the number of folds; in this case 4. We can produce an output:
###Code
yhat = cross_val_predict(lre,x_data[['horsepower']], y_data,cv=4)
yhat[0:5]
###Output
_____no_output_____
###Markdown
Part 2: Overfitting, Underfitting and Model SelectionIt turns out that the test data sometimes referred to as the out of sample data is a much better measure of how well your model performs in the real world. One reason for this is overfitting; let's go over some examples. It turns out these differences are more apparent in Multiple Linear Regression and Polynomial Regression so we will explore overfitting in that context. Let's create Multiple linear regression objects and train the model using 'horsepower', 'curb-weight', 'engine-size' and 'highway-mpg' as features.
###Code
lr = LinearRegression()
lr.fit(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_train)
###Output
_____no_output_____
###Markdown
Prediction using training data:
###Code
yhat_train = lr.predict(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
yhat_train[0:5]
###Output
_____no_output_____
###Markdown
Prediction using test data:
###Code
yhat_test = lr.predict(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
yhat_test[0:5]
###Output
_____no_output_____
###Markdown
Let's perform some model evaluation using our training and testing data separately. First we import the seaborn and matplotlibb library for plotting.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
###Output
_____no_output_____
###Markdown
Let's examine the distribution of the predicted values of the training data.
###Code
Title = 'Distribution Plot of Predicted Value Using Training Data vs Training Data Distribution'
DistributionPlot(y_train, yhat_train, "Actual Values (Train)", "Predicted Values (Train)", Title)
###Output
_____no_output_____
###Markdown
Figure 1: Plot of predicted values using the training data compared to the training data. So far the model seems to be doing well in learning from the training dataset. But what happens when the model encounters new data from the testing dataset? When the model generates new values from the test data, we see the distribution of the predicted values is much different from the actual target values.
###Code
Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
DistributionPlot(y_test,yhat_test,"Actual Values (Test)","Predicted Values (Test)",Title)
###Output
_____no_output_____
###Markdown
Figur 2: Plot of predicted value using the test data compared to the test data. Comparing Figure 1 and Figure 2; it is evident the distribution of the test data in Figure 1 is much better at fitting the data. This difference in Figure 2 is apparent where the ranges are from 5000 to 15 000. This is where the distribution shape is exceptionally different. Let's see if polynomial regression also exhibits a drop in the prediction accuracy when analysing the test dataset.
###Code
from sklearn.preprocessing import PolynomialFeatures
###Output
_____no_output_____
###Markdown
OverfittingOverfitting occurs when the model fits the noise, not the underlying process. Therefore when testing your model using the test-set, your model does not perform as well as it is modelling noise, not the underlying process that generated the relationship. Let's create a degree 5 polynomial model. Let's use 55 percent of the data for training and the rest for testing:
###Code
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.45, random_state=0)
###Output
_____no_output_____
###Markdown
We will perform a degree 5 polynomial transformation on the feature 'horse power'.
###Code
pr = PolynomialFeatures(degree=5)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
pr
###Output
_____no_output_____
###Markdown
Now let's create a linear regression model "poly" and train it.
###Code
poly = LinearRegression()
poly.fit(x_train_pr, y_train)
###Output
_____no_output_____
###Markdown
We can see the output of our model using the method "predict." then assign the values to "yhat".
###Code
yhat = poly.predict(x_test_pr)
yhat[0:5]
###Output
_____no_output_____
###Markdown
Let's take the first five predicted values and compare it to the actual targets.
###Code
print("Predicted values:", yhat[0:4])
print("True values:", y_test[0:4].values)
###Output
Predicted values: [ 6728.65561887 7307.98782321 12213.78770965 18893.24804015]
True values: [ 6295. 10698. 13860. 13499.]
###Markdown
We will use the function "PollyPlot" that we defined at the beginning of the lab to display the training data, testing data, and the predicted function.
###Code
PollyPlot(x_train[['horsepower']], x_test[['horsepower']], y_train, y_test, poly,pr)
###Output
_____no_output_____
###Markdown
Figur 4 A polynomial regression model, red dots represent training data, green dots represent test data, and the blue line represents the model prediction. We see that the estimated function appears to track the data but around 200 horsepower, the function begins to diverge from the data points. R^2 of the training data:
###Code
poly.score(x_train_pr, y_train)
###Output
_____no_output_____
###Markdown
R^2 of the test data:
###Code
poly.score(x_test_pr, y_test)
###Output
_____no_output_____
###Markdown
We see the R^2 for the training data is 0.5567 while the R^2 on the test data was -29.87. The lower the R^2, the worse the model, a Negative R^2 is a sign of overfitting. Let's see how the R^2 changes on the test data for different order polynomials and plot the results:
###Code
Rsqu_test = []
order = [1, 2, 3, 4]
for n in order:
pr = PolynomialFeatures(degree=n)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
lr.fit(x_train_pr, y_train)
Rsqu_test.append(lr.score(x_test_pr, y_test))
plt.plot(order, Rsqu_test)
plt.xlabel('order')
plt.ylabel('R^2')
plt.title('R^2 Using Test Data')
plt.text(3, 0.75, 'Maximum R^2 ')
###Output
_____no_output_____
###Markdown
We see the R^2 gradually increases until an order three polynomial is used. Then the R^2 dramatically decreases at four. The following function will be used in the next section; please run the cell.
###Code
def f(order, test_data):
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=test_data, random_state=0)
pr = PolynomialFeatures(degree=order)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
poly = LinearRegression()
poly.fit(x_train_pr,y_train)
PollyPlot(x_train[['horsepower']], x_test[['horsepower']], y_train,y_test, poly, pr)
###Output
_____no_output_____
###Markdown
The following interface allows you to experiment with different polynomial orders and different amounts of data.
###Code
interact(f, order=(0, 6, 1), test_data=(0.05, 0.95, 0.05))
###Output
_____no_output_____
###Markdown
Question 4a):We can perform polynomial transformations with more than one feature. Create a "PolynomialFeatures" object "pr1" of degree two?
###Code
# Write your code below and press Shift+Enter to execute
pr1=PolynomialFeatures(degree=2)
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonpr1=PolynomialFeatures(degree=2)``` Question 4b): Transform the training and testing samples for the features 'horsepower', 'curb-weight', 'engine-size' and 'highway-mpg'. Hint: use the method "fit_transform" ?
###Code
# Write your code below and press Shift+Enter to execute
x_train_pr1=pr1.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
x_test_pr1=pr1.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonx_train_pr1=pr1.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])x_test_pr1=pr1.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])``` <!-- The answer is below:x_train_pr1=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])x_test_pr1=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])--> Question 4c): How many dimensions does the new feature have? Hint: use the attribute "shape"
###Code
# Write your code below and press Shift+Enter to execute
x_train_pr1.shape
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonx_train_pr1.shape there are now 15 features``` Question 4d): Create a linear regression model "poly1" and train the object using the method "fit" using the polynomial features?
###Code
# Write your code below and press Shift+Enter to execute
poly1=LinearRegression().fit(x_train_pr1,y_train)
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonpoly1=LinearRegression().fit(x_train_pr1,y_train)``` Question 4e): Use the method "predict" to predict an output on the polynomial features, then use the function "DistributionPlot" to display the distribution of the predicted output vs the test data?
###Code
# Write your code below and press Shift+Enter to execute
yhat_test1=poly1.predict(x_test_pr1)
Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonyhat_test1=poly1.predict(x_test_pr1)Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)``` Question 4f): Using the distribution plot above, explain in words about the two regions were the predicted prices are less accurate than the actual prices
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonThe predicted value is higher than actual value for cars where the price $10,000 range, conversely the predicted price is lower than the price cost in the $30,000 to $40,000 range. As such the model is not as accurate in these ranges.``` Part 3: Ridge regression In this section, we will review Ridge Regression we will see how the parameter Alfa changes the model. Just a note here our test data will be used as validation data. Let's perform a degree two polynomial transformation on our data.
###Code
pr=PolynomialFeatures(degree=2)
x_train_pr=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
x_test_pr=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
###Output
_____no_output_____
###Markdown
Let's import Ridge from the module linear models.
###Code
from sklearn.linear_model import Ridge
###Output
_____no_output_____
###Markdown
Let's create a Ridge regression object, setting the regularization parameter to 0.1
###Code
RigeModel=Ridge(alpha=0.1)
###Output
_____no_output_____
###Markdown
Like regular regression, you can fit the model using the method fit.
###Code
RigeModel.fit(x_train_pr, y_train)
###Output
/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.02972e-16): result may not be accurate.
overwrite_a=True).T
###Markdown
Similarly, you can obtain a prediction:
###Code
yhat = RigeModel.predict(x_test_pr)
###Output
_____no_output_____
###Markdown
Let's compare the first five predicted samples to our test set
###Code
print('predicted:', yhat[0:4])
print('test set :', y_test[0:4].values)
###Output
predicted: [ 6567.83081933 9597.97151399 20836.22326843 19347.69543463]
test set : [ 6295. 10698. 13860. 13499.]
###Markdown
We select the value of Alpha that minimizes the test error, for example, we can use a for loop.
###Code
Rsqu_test = []
Rsqu_train = []
dummy1 = []
Alpha = 10 * np.array(range(0,1000))
for alpha in Alpha:
RigeModel = Ridge(alpha=alpha)
RigeModel.fit(x_train_pr, y_train)
Rsqu_test.append(RigeModel.score(x_test_pr, y_test))
Rsqu_train.append(RigeModel.score(x_train_pr, y_train))
###Output
_____no_output_____
###Markdown
We can plot out the value of R^2 for different Alphas
###Code
width = 12
height = 10
plt.figure(figsize=(width, height))
plt.plot(Alpha,Rsqu_test, label='validation data ')
plt.plot(Alpha,Rsqu_train, 'r', label='training Data ')
plt.xlabel('alpha')
plt.ylabel('R^2')
plt.legend()
###Output
_____no_output_____
###Markdown
**Figure 6**:The blue line represents the R^2 of the validation data, and the red line represents the R^2 of the training data. The x-axis represents the different values of Alpha. Here the model is built and tested on the same data. So the training and test data are the same.The red line in figure 6 represents the R^2 of the training data. As Alpha increases the R^2 decreases. Therefore as Alpha increases the model performs worse on the training data. The blue line represents the R^2 on the validation data. As the value for Alpha increases the R^2 increases and converges at a point Question 5): Perform Ridge regression and calculate the R^2 using the polynomial features, use the training data to train the model and test data to test the model. The parameter alpha should be set to 10.
###Code
# Write your code below and press Shift+Enter to execute
RigeModel = Ridge(alpha=10)
RigeModel.fit(x_train_pr, y_train)
RigeModel.score(x_test_pr, y_test)
###Output
_____no_output_____
###Markdown
Click here for the solution```pythonRigeModel = Ridge(alpha=10) RigeModel.fit(x_train_pr, y_train)RigeModel.score(x_test_pr, y_test)``` Part 4: Grid Search The term Alfa is a hyperparameter, sklearn has the class GridSearchCV to make the process of finding the best hyperparameter simpler. Let's import GridSearchCV from the module model_selection.
###Code
from sklearn.model_selection import GridSearchCV
###Output
_____no_output_____
###Markdown
We create a dictionary of parameter values:
###Code
parameters1= [{'alpha': [0.001,0.1,1, 10, 100, 1000, 10000, 100000, 100000]}]
parameters1
###Output
_____no_output_____
###Markdown
Create a ridge regions object:
###Code
RR=Ridge()
RR
###Output
_____no_output_____
###Markdown
Create a ridge grid search object
###Code
Grid1 = GridSearchCV(RR, parameters1,cv=4)
###Output
_____no_output_____
###Markdown
Fit the model
###Code
Grid1.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_data)
###Output
/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/model_selection/_search.py:841: DeprecationWarning: The default of the `iid` parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.
DeprecationWarning)
###Markdown
The object finds the best parameter values on the validation data. We can obtain the estimator with the best parameters and assign it to the variable BestRR as follows:
###Code
BestRR=Grid1.best_estimator_
BestRR
###Output
_____no_output_____
###Markdown
We now test our model on the test data
###Code
BestRR.score(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_test)
###Output
_____no_output_____
###Markdown
Question 6): Perform a grid search for the alpha parameter and the normalization parameter, then find the best values of the parameters
###Code
# Write your code below and press Shift+Enter to execute
parameters2= [{'alpha': [0.001,0.1,1, 10, 100, 1000,10000,100000,100000],'normalize':[True,False]} ]
Grid2 = GridSearchCV(Ridge(), parameters2,cv=4)
Grid2.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']],y_data)
Grid2.best_estimator_
###Output
/home/jupyterlab/conda/envs/python/lib/python3.6/site-packages/sklearn/model_selection/_search.py:841: DeprecationWarning: The default of the `iid` parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.
DeprecationWarning)
|
comp-cientifica-II-2019-2/.ipynb_checkpoints/Prova1_GilMiranda-checkpoint.ipynb | ###Markdown
Prova I- Computação Centífica II> Aluno: Gil Miranda> DRE: 118037119> E-mail: [email protected]; [email protected] Set-up of dependencies and libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
##### Vectorized 4th Order Runge Kutta
### Input: F -> Differential equation;
### y0 -> list or scalar for initial condition;
### ts -> list of points on time to evaluate the equation;
### p -> list or scalar for parameters for F, default is set to 0 if F has no extra parameters;
### Output: ys -> numpy array with all solutions for each step t, ys is a Matrix
##### Gil Miranda - last revision 26/10/2019
def rk_4(F, y0, ts, p = 0):
ys = [y0]
t = ts[0]
h = ts[1] - ts[0]
for tnext in ts:
k1 = h*F(t, ys[-1], p)
k2 = h*F(t + h/2, ys[-1] + k1/2, p)
k3 = h*F(t + h/2, ys[-1] + k2/2, p)
k4 = h*F(t + h, ys[-1] + k3)
ynext = ys[-1] + (k1/6+k2/3+k3/3+k4/6)
ys.append(ynext)
t = tnext
return ys[:-1]
###Output
_____no_output_____
###Markdown
--- Creating the 4th and 3rd order Adams-Bashforth MethodThe Adams-Bashforth method can be written as$$u_{k} = u_{k-1} + h\left(f_{k-1} + \frac{1}{2}\nabla f_{k-1}+ \frac{5}{12}\nabla^2 f_{k-1}+ \frac{9}{24}\nabla^3 f_{k-1} + \dots \right)$$And we can aproximate$$\begin{align}\nabla f_{k-1} &= f_{k-1} - f_{k-2}\\\nabla^2 f_{k-1} &= f_{k-1} - 2f_{k-2} + f_{k-3}\\\nabla^3 f_{k-1} &= f_{k-1} - 3f_{k-2} + 3f_{k-3} - f_{k-4}\\\end{align}$$So we have$$u_{k} = u_{k-1} + h\left(f_{k-1} + \frac{1}{2}(f_{k-1} - f_{k-2})+ \frac{5}{12}(f_{k-1} - 2f_{k-2} + f_{k-3}) + \frac{9}{24}(f_{k-1} - 3f_{k-2} + 3f_{k-3} - f_{k-4})\right)$$And collecting terms for the 3rd order and 4th order method, we can end with$$u_{k} = u_{k-1} + h\left(\frac{23}{12}f_{k-1} - \frac{16}{12}f_{k-2} + \frac{5}{12}f_{k-3}\right)$$for the 3rd order, and$$u_{k} = u_{k-1} + h\left(\frac{55}{24}f_{k-1} - \frac{59}{24}f_{k-2} + \frac{37}{24}f_{k-3} - \frac{9}{24}f_{k-4}\right)$$for the 4th order methodWe have to know the $f_{k-1},\dots, f_{k-n}$ points to solve using AB nth order method , we can solve for this $n$ points using Runge-Kutta
###Code
##### Adams-Bashfort 4th & 3rd order
### Input: F -> Differential equation;
### t0 -> initial point in time;
### tf -> final point in time;
### y0 -> initial condition;
### h -> step size;
### order -> order of the method;
### Output: ys -> list with all solutions for each step t;
### ts -> list with all points in time;
##### Gil Miranda - last revision 29/10/2019
def adams_bash(F, t0, tf, y0, h, order = 3):
if order == 4:
ws = [55/24, -59/24, 37/24, -9/24] ## setting the weights for 4th order
else:
order = 3
ws = [23/12, -16/12, 5/12] ## setting the weights for 3rd order
### initializing the list of points in time
ts = [t0]
for i in range(order-1):
ts.append(ts[-1] + h)
### solving for the first n points with runge-kutta 4th order, so we can initiliaze AB method
first_ys = rk_4(F, y0, ts)
n = len(first_ys)
ys = first_ys[:] ## list of solutions, initiliazed with rk4
### Adams-Bashfort
while(ts[-1] <= tf):
fs = [F(ts[-i], ys[-i]) for i in range(1,n+1)] ## list with f_k-1,...,f_k-n
ynew = ys[-1]
for (wi,fi) in zip(ws,fs):
ynew += h*(wi*fi)
ys.append(ynew)
ts.append(ts[-1] + h)
return ys, ts
###Output
_____no_output_____
###Markdown
The differential EquationHere is the function that returns the differential equation$$\frac{\mathrm{d}u}{\mathrm{d}t} = 10e^{-\frac{(t-2)^2}{2(0.075)^2}} - 0.6u$$
###Code
def eq_1(t,u, p=0):
a = -(t-2)**2/(2*(0.075)**2)
b = 10*np.e**a
c = 0.6*u
return b-c
###Output
_____no_output_____
###Markdown
Solving the Differential Equation with Adams-Bashforth 4th and 3rd Order
###Code
y0 = 0.5 # Initial Condition
hs = [0.01, 0.04, 0.06, 0.08] ## step size
hs_name = [hs[0], hs[0], hs[1], hs[1], hs[2], hs[2], hs[3], hs[3]] ## step size names for plotting
ys_3 = []
ts_3 = []
ys_4 = []
ts_4 = []
for h in hs:
y_3, t_3 = adams_bash(eq_1, 0, 4, y0, h) ## solving order 3 for each step size
y_4, t_4 = adams_bash(eq_1, 0, 4, y0, h, order = 4) ## solving order 4 for each step size
ys_3.append(y_3)
ts_3.append(t_3)
ys_4.append(y_4)
ts_4.append(t_4)
## plotting time
fig, ((ax1,ax2), (ax3,ax4), (ax5, ax6), (ax7, ax8)) = plt.subplots(nrows=4, ncols=2, figsize=(14,18))
fig.suptitle('Numerical Solution with Adams-Bashforth', weight='bold')
names = ['3rd', '4th', '3rd', '4th', '3rd', '4th', '3rd', '4th']
axs = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
for i,j,h in zip(axs,names,hs_name):
i.set_title(j + ' Order AB Method - step = $' + str(h) + '$')
for a in axs:
a.grid(alpha = 0.5)
ax1.plot(ts_3[0],ys_3[0], color = 'red')
ax2.plot(ts_4[0],ys_3[0], color = 'red')
ax3.plot(ts_3[1],ys_3[1], color = 'green')
ax4.plot(ts_4[1],ys_4[1], color = 'green')
ax5.plot(ts_3[2],ys_3[2], color = 'blue')
ax6.plot(ts_4[2],ys_4[2], color = 'blue')
ax7.plot(ts_3[3],ys_3[3], color = 'black')
ax8.plot(ts_4[3],ys_4[3], color = 'black')
plt.show()
###Output
_____no_output_____
###Markdown
Visualizing error between 3rd and 4th orders
###Code
## calculating the local error
err = []
for j in range(3):
e = [abs(ys_4[j][i] - abs(ys_3[j][i])) for i in range(len(ys_4[j]))]
err.append(e)
## plotting time
fig, ((ax1), (ax2), (ax3)) = plt.subplots(nrows=1, ncols=3, figsize=(20,5))
fig.suptitle('Local error between 3rd and 4th order methods')
axs = [ax1, ax2, ax3]
for i,h in zip(axs,hs):
i.set_title('step = $' + str(h) + '$')
for a in axs:
a.grid(alpha = 0.5)
ax1.plot(ts_3[0],err[0], color = 'red')
ax2.plot(ts_4[1],err[1], color = 'green')
ax3.plot(ts_3[2],err[2], color = 'blue')
plt.show()
###Output
_____no_output_____
###Markdown
--- Adaptative Adams-Bashforth Method
###Code
##### Adaptative Adams-Bashfort 4th & 3rd order
### Input: F -> Differential equation;
### t0 -> initial point in time;
### tf -> final point in time;
### y0 -> initial condition;
### h -> step size;
### tol -> error tolerance;
### Output: ys -> list with all solutions for each step t;
### ts -> list with all points in time;
##### Gil Miranda - last revision 30/10/2019
def adams_bash_adp(F, t0, tf, y0, h, tol = 1e-3):
ws_4 = [55/24, -59/24, 37/24, -9/24] ## setting the weights for 4th order
ws_3 = [23/12, -16/12, 5/12] ## setting the weights for 3rd order
### initializing the list of points in time
ts = [t0]
for i in range(4):
ts.append(ts[-1] + h)
### solving for the first n points with runge-kutta 4th order, so we can initiliaze AB method
first_ys = rk_4(F, y0, ts)
ys = first_ys[:] ## list of solutions, initiliazed with rk4
### Adptative Adams-Bashfort
while(ts[-1] <= tf):
fs = [F(ts[-i], ys[-i]) for i in range(1,5)] ## list with f_k-1,...,f_k-n
## Solving with 3rd order
ynew = ys[-1]
for (wi,i) in zip(ws_3, range(3)):
ynew += h*(wi* fs[i])
## Solving with 4th order (using 3rd order points)
ynew_til = ys[-1]
for (wi, i) in zip(ws_4, range(4)):
ynew_til += h*(wi * fs[i])
## Local error
err = abs(ynew_til - ynew)
## if err < tol we accept the solution
if err <= tol:
ys.append(ynew)
tnext = ts[-1] + h
ts.append(tnext)
h = h*(tol/err)**(1/4)
else:
## else, we get a new step
h = h/2
return ys, ts
###Output
_____no_output_____
###Markdown
Visualizing Solutions
###Code
t0 = 0
tf = 4
h = 0.1
y0 = 0.5
ys_1, ts_1 = adams_bash_adp(eq_1, t0, tf, y0, h, tol = 1e-3)
ys_2, ts_2 = adams_bash_adp(eq_1, t0, tf, y0, h, tol = 1e-4)
## plotting time
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(14,5))
fig.suptitle('Numerical Solution with Adaptative Adams-Bashforth, initial step size = $' + str(h) + '$', weight='bold')
# names = ['3rd', '4th', '3rd', '4th', '3rd', '4th']
axs = [ax1, ax2]
for a in axs:
a.grid(alpha = 0.5)
ax1.plot(ts_1,ys_1, color = 'black')
ax1.plot(ts_1,ys_1, 'o', color = 'red')
ax1.set_title('tol = $10^{-3}$')
ax2.plot(ts_2,ys_2, color = 'black')
ax2.plot(ts_2,ys_2, 'o', color = 'blue')
ax2.set_title('tol = $10^{-4}$')
plt.show()
###Output
_____no_output_____
###Markdown
--- Bonus Method: Predictor-Corrector Adams-Bashforth-Moulton 4th order MethodPredictor equation (4th order AB)$$u_{k} = u_{k-1} + h\left(\frac{55}{24}f_{k-1} - \frac{59}{24}f_{k-2} + \frac{37}{24}f_{k-3} - \frac{9}{24}f_{k-4}\right)$$Corrector equation (4th order AM)$$u_{k} = u_{k-1} + h\left(\frac{9}{24}f_{k} + \frac{19}{24}f_{k-1} - \frac{5}{24}f_{k-2} + \frac{1}{24}f_{k-3}\right)$$We initialize the AB with RK4, and then uses AB $f_k$ to initialize the AM, and then compute the error between the steps, if $err > tol$, tol is given tolerance, calculate again
###Code
##### Adams-Bashfort-Moulton 4th order
### Input: F -> Differential equation;
### t0 -> initial point in time;
### tf -> final point in time;
### y0 -> initial condition;
### h -> step size;
### tol -> error tolerance;
### Output: ys -> list with all solutions for each step t;
### ts -> list with all points in time;
##### Gil Miranda - last revision 30/10/2019
def adams_bash_mou(F, t0, tf, y0, h, tol = 1e-12):
ws_ab = [55/24, -59/24, 37/24, -9/24] ## setting the weights for adams bashforth
ws_am = [9/24, 19/24, -5/24, 1/24] ## setting the weights for adams moulton
### initializing the list of points in time
ts = [t0]
for i in range(3):
ts.append(ts[-1] + h)
### solving for the first n points with runge-kutta 4th order, so we can initiliaze AB method
first_ys = rk_4(F, y0, ts)
n = len(first_ys)
ys = first_ys[:] ## list of solutions, initiliazed with rk4
while(ts[-1] <= tf):
## Calculating the AB solution
fs_ab = [F(ts[-i], ys[-i]) for i in range(1,n+1)] ## list with f_k-1,...,f_k-n
y_ab = ys[-1]
for (wi,fi) in zip(ws_ab,fs_ab):
y_ab += h*(wi*fi)
## Generating next point in time
tnext = ts[-1] + h
## Generating the f(ti,yi) point to use with AM
f1_am = F(tnext, y_ab)
fs_am = [f1_am] + fs_ab[:-1]
## Calculating the AM solution
y_am = ys[-1]
for (wi,fi) in zip(ws_am,fs_am):
y_am += h*(wi*fi)
## error
err = abs(y_am - y_ab)
y_old = y_am
y_new = y_am
while(err > tol):
## Calculating another solution with AM using last AM solution
f1_new = F(tnext, y_old)
fs_new = [f1_new] + fs_ab[:-1]
y_new = ys[-1]
for (wi,fi) in zip(ws_am,fs_new):
y_new += h*(wi*fi)
err = abs(y_new - y_old)
y_old = y_new
ys.append(y_new)
ts.append(tnext)
return ys, ts
y0 = 0.5 # Initial Condition
hs = [0.01, 0.04, 0.06, 0.08] ## step size
hs_name = [hs[0], hs[1], hs[2], hs[3]] ## step size names for plotting
ys_5 = []
ts_5 = []
for h in hs:
y_5, t_5 = adams_bash_mou(eq_1, 0, 4, y0, h) ## solving for each step with predictor-corrector
ys_5.append(y_5)
ts_5.append(t_5)
## plotting time
fig, ((ax1,ax2), (ax3,ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(14,10))
fig.suptitle('Numerical Solution with Adams-Bashforth-Moulton', weight='bold')
axs = [ax1, ax2, ax3, ax4]
for i,h in zip(axs,hs_name):
i.set_title('h = $' + str(h) + '$')
for a in axs:
a.grid(alpha = 0.5)
ax1.plot(ts_5[0],ys_5[0], color = 'red')
ax2.plot(ts_5[1],ys_5[1], color = 'blue')
ax3.plot(ts_5[2],ys_5[2], color = 'green')
ax4.plot(ts_5[3],ys_5[3], color = 'orange')
plt.show()
###Output
_____no_output_____
###Markdown
Unlike the AB 3rd and 4th order methods, we can get a decent solution even with step size $h = 0.08$ with the ABM methodPlotting the solutions for each step-size with AB4, AB Adaptative, ABM
###Code
## plotting time
fig, ((ax1,ax2), (ax3,ax4)) = plt.subplots(nrows=2, ncols=2, figsize=(14,10))
fig.suptitle('Comparison of solutions for all methods', weight='bold')
axs = [ax1, ax2, ax3, ax4]
for i,h in zip(axs,hs_name):
i.set_title('h = $' + str(h) + '$')
for a in axs:
a.grid(alpha = 0.5)
ax1.plot(ts_5[0],ys_5[0], color = 'red', label = 'ABM')
ax1.plot(ts_4[0], ys_4[0], label = 'AB 4')
ax1.plot(ts_2, ys_2, label = 'AB Adp')
ax2.plot(ts_5[1],ys_5[1], color = 'blue', label = 'ABM')
ax2.plot(ts_4[1], ys_4[1], color = 'red', label = 'AB 4')
ax3.plot(ts_4[2], ys_4[2], color = 'orange', label = 'AB 4')
ax3.plot(ts_5[2],ys_5[2], color = 'green', label = 'ABM')
ax4.plot(ts_5[3],ys_5[3], color = 'orange', label = 'ABM')
ax4.plot(ts_4[3], ys_4[3], color = 'green', label = 'AB 4')
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/00.1.3-KershawDW1.ipynb | ###Markdown
Capstone 2: Data Wrangling Update 11/6 Doing a smaller version first , specifically focused on Clayton Kershaw in 2015 In a given at bat, what pitch is coming next? OverviewHow can we use data from 2015-2018 to predict/assess what type of pitch should be thrown in a given at bat?This can be looked at from both the pitching team’s perspective (what is the ideal pitch for a given situation), and from the batting team (what pitch should I expect, assuming that the pitcher will choose the optimal pitch). Using the ab_id, we can link the data in the pitches csv to the data in the atbats csv to look at the outcomes of at bats along with the exact type and order of pitches thrown. We can add to this by using the batter_id and pitcher_id to gather specific data for a given hitter or pitcher by linking with the player_names csv. The data will be acquired from https://www.kaggle.com/pschale/mlb-pitch-data-20152018?select=pitches.csvwhich was scraped from http://gd2.mlb.com/components/game/mlb/.
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import pandas_profiling
from library.sb_utils import save_file
%matplotlib inline
from pathlib import Path
import pandas_profiling
import requests
from pandas_profiling.utils.cache import cache_file
from pandas_profiling import ProfileReport
import datetime as dt
from datetime import datetime
###Output
_____no_output_____
###Markdown
Because I found myself repeatedly using this technique, I decided to define the following function to show missing values
###Code
#missing values function
def missing(df):
'''Takes dataframe as argument,
returns count and % of missing values for each column'''
import pandas as pd
miss = pd.concat([df.isnull().sum(), 100 * df.isnull().mean()], axis = 1)
miss.columns = ['count','%']
return miss.sort_values(by = ['count'], ascending = False)
###Output
_____no_output_____
###Markdown
Import the data First we will do a small file (player_names), and then do the rest
###Code
#the CSV data files are in the data/raw directory
#player names
names = pd.read_csv('../data/raw/player_names.csv')
names.info()
names.head()
###Output
_____no_output_____
###Markdown
Import Games and At-Bats Data
###Code
#the CSV data files are in the data/raw directory
#at bat info
atbats = pd.read_csv('../data/raw/atbats.csv')
#games info
games = pd.read_csv('../data/raw/games.csv')
atbats.info()
games.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 9718 entries, 0 to 9717
Data columns (total 17 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 attendance 9718 non-null int64
1 away_final_score 9718 non-null int64
2 away_team 9718 non-null object
3 date 9718 non-null object
4 elapsed_time 9718 non-null int64
5 g_id 9718 non-null int64
6 home_final_score 9718 non-null int64
7 home_team 9718 non-null object
8 start_time 9718 non-null object
9 umpire_1B 9718 non-null object
10 umpire_2B 9715 non-null object
11 umpire_3B 9718 non-null object
12 umpire_HP 9718 non-null object
13 venue_name 9718 non-null object
14 weather 9718 non-null object
15 wind 9718 non-null object
16 delay 9718 non-null int64
dtypes: int64(6), object(11)
memory usage: 1.3+ MB
###Markdown
Import Pitch Data (800 mb)
###Code
#the CSV data files are in the data/raw directory
pitches = pd.read_csv('../data/raw/pitches.csv')
pitches.info()
pitches.head()
###Output
_____no_output_____
###Markdown
Check missing values for at bats, players, pitches missing values for each column in pitches missingP = pd.concat([pitches.isnull().sum(), 100 * pitches.isnull().mean()], axis=1) missingP.columns=['count', '%'] missingP.sort_values(by=['count'], ascending = False)
###Code
missing(pitches)
#missing values for each column in atbats
missingAB = pd.concat([atbats.isnull().sum(), 100 * atbats.isnull().mean()], axis=1)
missingAB.columns=['count', '%']
missingAB.sort_values(by=['count'], ascending = False)
missing(names)
#missing values for games
missing(games)
###Output
_____no_output_____
###Markdown
There are no missing values in the AB and Names dataframes Because there are no columns with more than 0.5% of values missing (i.e. no large chunks missing) in the pitches and games dataframes, I am just going to remove rows with missing values. I think I want to concat the df's first (link the abs, pitches, and games) before removing though.
###Code
#maybe do groupby ab id for pitches first.
pitches.ab_id.unique().shape
atbats.ab_id.unique().shape
###Output
_____no_output_____
###Markdown
It appears that there are 159 at bats missing from the pitches df. Ill start by trying to groupy abid on the pitches df and then decide how I want to join the dfs
###Code
pitches.columns; atbats.columns
#at bats with names
names.head()
atbats.head()
###Output
_____no_output_____
###Markdown
Kershaw subset
###Code
names[names['first_name']=='Clayton']
kAB = atbats[atbats.pitcher_id == 477132]
kPitch = pitches[pitches['ab_id'].isin(kAB['ab_id'])]
#Kershaw = KershawAB.merge(KershawPitches, how = 'right', left_on = 'ab_id', right_on = 'ab_id' )
kPitchSubset = kPitch.iloc[:, 27:]
kPitchSubset.columns
#atbatNames = names.merge(atbats, how = 'right', left_on = 'id', right_on = 'pitcher_id' )
ktemp = kAB.merge(games[['g_id','date']], on = 'g_id', how = 'left')
ktemp.columns
ktemp['date'] = pd.to_datetime(ktemp['date'], infer_datetime_format = True)
k15 = ktemp[ktemp.date < datetime.fromisoformat('2016-01-01')]
k15.tail()
###Output
_____no_output_____
###Markdown
Now add pitch data
###Code
Kershaw2015 = k15.merge(kPitchSubset, on = 'ab_id' , how = 'left')
Kershaw2015.head()
Kershaw2015.info()
pitch_types = {'FF': 1, 'SL': 2, 'CU': 3, 'FT': 4, 'CH': 5}
Kershaw2015['pcodes'] = Kershaw2015.pitch_type.replace(pitch_types)
Kershaw2015.pcodes = pd.to_numeric(Kershaw2015.pcodes, errors = 'coerce')
Kershaw2015.columns
# save the data to a new csv file
datapath = '../data/interim'
save_file(Kershaw2015, 'Kershaw2015.csv', datapath)
###Output
Writing file. "../data/interim\Kershaw2015.csv"
###Markdown
GAP
###Code
Kershaw2015.shape
###Output
_____no_output_____
###Markdown
CKreport = Kershaw2015.profile_report(sort=None, html={'style':{'full_width': True}}) CKreport.to_file("CKprofile15.html") CKreport Start with Clayton Kershaw, id 477132 report = df.profile_report(sort=None, html={'style':{'full_width': True}}, progress_bar=False) report profile_report.to_file("tmp/example.html") profile = ProfileReport(df, minimal=True) profile.to_file(output_file="output_min.html") save the data to a new csv file datapath = '../data/interim' save_file(kersh2015, 'kersh2015.csv', datapath)
###Code
Kershaw2015.pitch_type.value_counts().plot(kind='bar')
plt.xlabel('pitch type')
plt.ylabel('Count')
plt.title('Distribution of pitches (Clayton Kershaw 2015)')
#plt.plot()
###Output
_____no_output_____
###Markdown
Get pitches in different counts Code task 16 Create a seaborn boxplot of the ticket price dataframe we created above, with 'state' on the x-axis, 'Price' as the y-value, and a hue that indicates 'Ticket' This will use boxplot's x, y, hue, and data arguments. plt.subplots(figsize=(12, 8)) sns.boxplot(x='state', y='Price', hue='Ticket', data=ticket_prices) plt.xticks(rotation='vertical') plt.ylabel('Price ($)') plt.xlabel('State');
###Code
#pitch thrown in each count for righties and lefties.
possible_counts = [(x,y) for x in range(4) for y in range(3)]
print(possible_counts)
###Output
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2)]
|
Books and Courses/PyTorch/.ipynb_checkpoints/Untitled-checkpoint.ipynb | ###Markdown
###Code
# setup imports
%matplotlib inline
from torchvision import transforms, datasets
import torchvision
import torch.optim as optim # optimization package
import torch.nn.functional as F
import torch.nn as nn
import torch
from tqdm import tqdm_notebook as tqdm # progress bar
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.tensorboard import SummaryWriter
plt.style.use('dark_background')
plt.rcParams["figure.figsize"] = (20, 13)
from collections import OrderedDict, namedtuple
from itertools import product # computes a cortege from multilpe lists
import time
# device (GPU) configuration
print('CUDA available:\t', torch.cuda.is_available())
print('Number of GPUs:\t', torch.cuda.device_count())
print('Default device:\t', torch.cuda.get_device_name())
# set the device
device = "cuda:0"
# enable the benchmark mode in cudnn (finds optimal settings for network configuration)
# but if theinput sizes changes at each iteration turn it off
torch.backends.cudnn.benchmark = True
###Output
CUDA available: True
Number of GPUs: 1
Default device: GeForce GTX 1060
###Markdown
set the batch sizebatch_size = 100 какие трансформации применяем к сету перед тем как начать его использовать (их может быть много)transform = transforms.Compose([transforms.ToTensor()]) ToTensor - because we need to transform the set`s data to the tensor form train set and train loadertrain_set = torchvision.datasets.FashionMNIST( root='D:/Stuff on HDD/My stuff/ML DS/Data_folder', train=True, download=True, download=True to download set from the web, train=True to get the part of the set designated for training transform=transform)train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True) test set and test loadertest_set = torchvision.datasets.FashionMNIST( root='D:/Stuff on HDD/My stuff/ML DS/Data_folder', train=False, download=False, False & False, cause for testing and already downloaded transform=transform)test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
###Code
class Network(nn.Module):
def __init__(self):
super().__init__()
# create the layers
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features=12 * 4 * 4, out_features=120) # 12*4*4 = 12 "инпутов" 4 на 4 пикселя
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=10)
def forward(self, t):
t = self.conv1(t)
t = F.relu(t) # set an activation function
t = F.max_pool2d(t, kernel_size=2, stride=2) # pooling layer
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = t.reshape(-1, 12 * 4 * 4) # flattening the tensor before forwarding it to fc1
t = self.fc1(t)
t = F.relu(t)
t = self.fc2(t)
t = F.relu(t)
t = self.out(t)
return t
class RunBuilder():
@staticmethod
def get_runs(params):
"""Returns a list of tuples of parameters for running a network"""
Run = namedtuple('Run', params.keys())
runs=[]
for v in product(*params.values()):
runs.append(Run(*v))
return runs
"""
class Epoch():
def __init__(self):
self.count = 0
self.loss = 0
self.num_correct = 0
self.start_time = None
class Run():
def __init__(self):
self.params = None
self.count = 0
self.data = []
self.start_time = None
"""
class RunManager():
def __init__(self):
self.epoch_count = 0
self.epoch_loss = 0
self.epoch_num_correct = 0
self.epoch_start_time = None
self.run_params = None
self.run_count = 0
self.run_data = []
self.run_start_time = None
self.network = None
self.loader = None
self.tb = None
def begin_run(self, run, network, loader):
self.run_start_time = time.time()
self.run_params = run
self.run_count += 1
self.network = network
self.loader = loader
self.tb = SummaryWriter(comment=f'-{run}')
images, labels = next(iter(self.loader))
images = images.to(device)
labels = labels.to(device)
grid = torchvision.utils.make_grid(images)
self.tb.add_image('images', grid)
self.tb.add_graph(self.network, images)
def end_run(self):
self.tb.close()
self.epoch_count = 0
def begin_epoch(self):
self.epoch_start_time = time.time()
self.epoch_count += 1
self.epoch_count = 0
self.epoch_num_correct = 0
def end_epoch(self):
"""End an epoch, calculate loss and accuracy, save the results to the TensorBoard and
DataFrame, display the DataFrame"""
epoch_duration = time.time() - self.epoch_start_time
run_duration = time.time() - self.run_start_time
# calculate loss & accuracy relative to the size of the dataset
loss = self.epoch_loss / len(self.loader.dataset)
accuracy = self.epoch_num_correct / len(self.loader.dataset)
self.tb.add_scalar('Loss', loss, self.epoch_count)
self.tb.add_scalar('Accuracy', accuracy, self.epoch_count)
# pass histogram data to the tensorboard
for name, param in self.network.named_parameters():
self.tb.add_histogram(name, param, self.epoch_count)
self.tb.add_histogram(f'{name}.grad', param.grad, self.epoch_count)
# saving run data to DataFrame
results = OrderedDict()
results["run"] = self.run_count
results["epoch"] = self.epoch_count
results["loss"] = loss
results["accuracy"] = accuracy
results["epoch duration"] = epoch_duration
results["run duration"] = run_duration
for key, value in self.run_params._asdict().items(): results[key] = value # add run_params to the dictionary
self.run_data.append(results)
df = pd.DataFrame.from_dict(self.run_data, orient='columns')
display(df) # display results of the
def track_loss(self, loss):
"""Update loss value"""
self.epoch_loss += loss.item() * self.loader.batch_size
def track_num_correct(self, preds, labels):
"""Update number of correct predictions"""
self.epoch_num_correct += self._get_num_correct(preds, labels)
@torch.no_grad() # turn off gradient tracking
def _get_num_correct(self, preds, labels): # underscore indicates that this is an internal class method
"""Calculate the number of correct predictions"""
return preds.argmax(dim=1).eq(labels).sum().item()
def save(self, fileName):
"""Save the results to .csv and .json"""
pd.DataFrame.from_dict(self.run_data, orient='columns').to_csv(f'{fileName}.csv')
with open(f'{fileName}.json', 'w', encoding='utf-8') as f:
json.dump(self.run_data, f, ensure_ascii=False, indent=4)
# setup the transformations to the dataset
transform = transforms.Compose([transforms.ToTensor()]) # ToTensor - because we need to transform the set`s data to the tensor f
# prepare the training set
train_set = torchvision.datasets.FashionMNIST(
root='D:/Stuff on HDD/My stuff/ML DS/Data_folder',
train=True, download=True, # train=True to get the part of the set designated for training
transform=transform)
# set the training parameters
params = OrderedDict(lr = [.01], batch_size = [100, 2000])
# start the training
m = RunManager()
for run in RunBuilder.get_runs(params):
network = Network().to(device)
loader = torch.utils.data.DataLoader(train_set, batch_size=run.batch_size)
optimizer = optim.Adam(network.parameters(), lr=run.lr)
m.begin_run(run, network, loader)
for epoch in tqdm(range(5), desc='Epoch'):
m.begin_epoch()
for batch in tqdm(loader, desc='Batch'):
images = batch[0]
images = images.to(device)
labels = batch[1]
labels = labels.to(device)
preds = network(images)
loss = F.cross_entropy(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
m.track_loss(loss)
m.track_num_correct(preds, labels)
m.end_epoch()
print('end epoch')
m.end_run()
m.save('results')
###Output
_____no_output_____ |
notebooks/3-convert-json.ipynb | ###Markdown
1 Introduction 1.0 Package imports
###Code
import numpy as np
import pandas as pd
import pickle
import os
import json
from newsplease import NewsPlease
from tqdm import tnrange
###Output
_____no_output_____
###Markdown
2 Definitions 2.0 Parameter definitions
###Code
country = 'indonesia'
year = 2017
month = 1
path = "../data/{}/".format(country)
def set_params(country, year, month):
metadata_path = (path + "metadata/" + str(year) + "/"
+ str(month).zfill(2) + ".csv")
fulltext_path = (path + "text/" + str(year) + "/"
+ str(month).zfill(2) + "/")
json_path = (path + "json/" + str(year) + "/"
+ str(month).zfill(2) + "/")
return metadata_path, fulltext_path, json_path
metadata_path, fulltext_path, json_path = set_params(country, year, month)
if not os.path.exists(json_path):
os.makedirs(json_path)
cols_to_keep = ['Actor1Code', 'Actor1Name', 'Actor2Code', 'IsRootEvent', 'EventCode', 'CAMEOCodeDescription',
'EventRootCode', 'QuadClass', 'GoldsteinScale', 'NumMentions', 'AvgTone', 'ActionGeo_FullName',
'ActionGeo_Lat', 'ActionGeo_Long', 'SOURCEURL', 'title']
###Output
_____no_output_____
###Markdown
2.1 Function definitions
###Code
def load_obj(idx: int, path: str = fulltext_path) -> "NewsPlease object":
idx = str(idx).zfill(5)
with open(path + idx + ".pkl", "rb") as f:
return pickle.load(f)
def save_obj(obj: dict, idx: int, path: str = json_path) -> None:
idx = str(idx).zfill(5)
with open(path + idx + ".pkl", 'wb') as fp:
pickle.dump(obj, fp)
###Output
_____no_output_____
###Markdown
3 ExecutionMerge the `dataframe` row with the `NewsPlease` text to create a `JSON` object as such:```javascript{ "id": "00001", "country": "IN", "url": "https://", "date": "MM-DD-YYYY", "full_text": string, "article_title": string, "number_actions": int, "actions": { 1: {'latitude', 'longitude', 'action_type', "goldstein", ...}, 2: {'latitude', 'longitude', 'action_type', "goldstein", ...}, 3: {'latitude', 'longitude', 'action_type', "goldstein", ...}, }, }``` `mapping_dictionary` maps: `{ "url_idx": "dataframe_idx"}`, `url_idx.zfill(5)` is the file name of the NewsPlease article in the `texts` folder.
###Code
action_dict = dict(zip(cols_to_keep[:-2], [None] * (len(cols_to_keep) - 2)))
base_dict = {
'id': None,
'country': None,
'url': None,
'full_text': None,
'article_title': None,
'number_actions': None,
'actions': dict()
}
def process_month(metadata_path, fulltext_path, json_path):
data = pd.read_csv(metadata_path)
urls = data['to_scrape'].unique()
mapping_dictionary = {}
for i, val in enumerate(urls):
match = data.index[data['to_scrape'] == urls[i]].tolist()
mapping_dictionary[i] = match
for i, val in enumerate(urls):
if os.path.exists(fulltext_path + str(i).zfill(5) + ".pkl"):
#print(fulltext_path + str(i).zfill(5) + ".pkl")
fulltext = load_obj(i, path = fulltext_path)
metadata = data.iloc[mapping_dictionary[i]]
metadata = metadata[cols_to_keep].reset_index()
item_dict = base_dict.copy()
item_dict['id'] = str(i).zfill(5)
item_dict['country'] = country
item_dict['url'] = fulltext.url
item_dict['article_title'] = fulltext.title
item_dict['date'] = fulltext.date_publish
item_dict['language'] = fulltext.language
item_dict['number_actions'] = len(metadata)
item_dict['text'] = fulltext.text
actions_dict = {}
for action in range(len(metadata)):
metadata_i = list(metadata.iloc[action][1:])
action_dict_i = dict(zip(cols_to_keep, metadata_i))
actions_dict[action] = action_dict_i
item_dict['actions'] = actions_dict
save_obj(item_dict, i, json_path)
for year in [2017, 2018, 2019]:
for month in tnrange(1, 12):
metadata_path, fulltext_path, json_path = set_params(country, year, month)
process_month(metadata_path, fulltext_path, json_path)
import pickle as pkl
#x = pkl.load("../data/indonesia/json/2017/01/00002.pkl")
with open("../data/brazil/json/2017/04/00475.pkl", "rb") as f:
print(pkl.load(f))
x
###Output
_____no_output_____ |
projects/crispy_shifties/02_mpnn_bound_states.ipynb | ###Markdown
Run MPNN interface design on the bound states Imports
###Code
%load_ext lab_black
# Python standard library
from glob import glob
import os
import socket
import sys
# 3rd party library imports
import dask
import matplotlib.pyplot as plt
import pandas as pd
import pyrosetta
import numpy as np
import scipy
import seaborn as sns
from tqdm.auto import tqdm # jupyter compatible progress bar
tqdm.pandas() # link tqdm to pandas
# Notebook magic
# save plots in the notebook
%matplotlib inline
# reloads modules automatically before executing cells
%load_ext autoreload
%autoreload 2
print(f"running in directory: {os.getcwd()}") # where are we?
print(f"running on node: {socket.gethostname()}") # what node are we on?
###Output
_____no_output_____
###Markdown
Set working directory to the root of the crispy_shifty repoTODO set to projects dir
###Code
os.chdir("/home/pleung/projects/crispy_shifty")
# os.chdir("/projects/crispy_shifty")
###Output
_____no_output_____
###Markdown
Run MPNN on the interfacesTODO
###Code
from crispy_shifty.utils.io import gen_array_tasks
simulation_name = "02_mpnn_bound_states"
design_list_file = os.path.join(
os.getcwd(), "projects/crispy_shifties/01_loop_bound_states/looped_states.list"
)
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
options = " ".join(
[
"out:level 200",
]
)
gen_array_tasks(
distribute_func="crispy_shifty.protocols.mpnn.mpnn_bound_state",
design_list_file=design_list_file,
output_path=output_path,
queue="medium",
memory="4G",
nstruct=1,
nstruct_per_task=1,
options=options,
simulation_name=simulation_name,
)
!sbatch -a 1-$(cat /mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/tasks.cmds | wc -l) /mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/02_mpnn_bound_states/run.sh
###Output
Submitted batch job 5826055
###Markdown
Collect scorefiles of designed bound states and concatenateTODO change to projects dir
###Code
sys.path.insert(0, "~/projects/crispy_shifty") # TODO
from crispy_shifty.utils.io import collect_score_file
simulation_name = "02_mpnn_bound_states"
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
if not os.path.exists(os.path.join(output_path, "scores.json")):
collect_score_file(output_path, "scores")
###Output
Warning: Use of `pyrosetta.distributed.cluster` namespace requires Anaconda (or Miniconda) to be properly installed for reproducibility of PyRosetta simulations. Please install Anaconda (or Miniconda) onto your system to enable running `which conda`. For installation instructions, visit:
https://docs.anaconda.com/anaconda/install
###Markdown
Load resulting concatenated scorefileTODO change to projects dir
###Code
sys.path.insert(0, "~/projects/crispy_shifty") # TODO
from crispy_shifty.utils.io import parse_scorefile_linear
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
scores_df = parse_scorefile_linear(os.path.join(output_path, "scores.json"))
scores_df = scores_df.convert_dtypes()
###Output
_____no_output_____
###Markdown
Setup for plotting
###Code
sns.set(
context="talk",
font_scale=1, # make the font larger; default is pretty small
style="ticks", # make the background white with black lines
palette="colorblind", # a color palette that is colorblind friendly!
)
###Output
_____no_output_____
###Markdown
Data explorationGonna remove the Rosetta sfxn scoreterms for now
###Code
from crispy_shifty.protocols.design import beta_nov16_terms
scores_df = scores_df[
[term for term in scores_df.columns if term not in beta_nov16_terms]
]
print(len(scores_df))
print(list(scores_df.columns))
###Output
1305
['bb_clash', 'best_average_DAN_plddts', 'best_average_plddts', 'best_model', 'best_ptm', 'best_rmsd_to_input', 'buns_parent', 'cms_AcB', 'cms_AnAc', 'cms_AnAcB', 'cms_AnB', 'docked_helix', 'dssp', 'exposed_hydrophobics_parent', 'geometry_parent', 'holes_all_parent', 'holes_core_parent', 'loop_dist', 'loop_sc', 'looped_length', 'mismatch_probability_parent', 'mpnn_seq_0000', 'mpnn_seq_0001', 'mpnn_seq_0002', 'mpnn_seq_0003', 'mpnn_seq_0004', 'mpnn_seq_0005', 'mpnn_seq_0006', 'mpnn_seq_0007', 'mpnn_seq_0008', 'mpnn_seq_0009', 'mpnn_seq_0010', 'mpnn_seq_0011', 'mpnn_seq_0012', 'mpnn_seq_0013', 'mpnn_seq_0014', 'mpnn_seq_0015', 'mpnn_seq_0016', 'mpnn_seq_0017', 'mpnn_seq_0018', 'mpnn_seq_0019', 'mpnn_seq_0020', 'mpnn_seq_0021', 'mpnn_seq_0022', 'mpnn_seq_0023', 'mpnn_seq_0024', 'mpnn_seq_0025', 'mpnn_seq_0026', 'mpnn_seq_0027', 'mpnn_seq_0028', 'mpnn_seq_0029', 'mpnn_seq_0030', 'mpnn_seq_0031', 'mpnn_seq_0032', 'mpnn_seq_0033', 'mpnn_seq_0034', 'mpnn_seq_0035', 'mpnn_seq_0036', 'mpnn_seq_0037', 'mpnn_seq_0038', 'mpnn_seq_0039', 'mpnn_seq_0040', 'mpnn_seq_0041', 'mpnn_seq_0042', 'mpnn_seq_0043', 'mpnn_seq_0044', 'mpnn_seq_0045', 'mpnn_seq_0046', 'mpnn_seq_0047', 'mpnn_seq_0048', 'mpnn_seq_0049', 'mpnn_seq_0050', 'mpnn_seq_0051', 'mpnn_seq_0052', 'mpnn_seq_0053', 'mpnn_seq_0054', 'mpnn_seq_0055', 'mpnn_seq_0056', 'mpnn_seq_0057', 'mpnn_seq_0058', 'mpnn_seq_0059', 'mpnn_seq_0060', 'mpnn_seq_0061', 'mpnn_seq_0062', 'mpnn_seq_0063', 'mpnn_seq_0064', 'new_loop_str', 'packstat_parent', 'parent', 'parent_length', 'path_in', 'pdb', 'pivot_helix', 'pre_break_helix', 'sap_parent', 'sc_AcB', 'sc_AnAc', 'sc_AnAcB', 'sc_AnB', 'sc_all_parent', 'scaffold_type', 'score', 'score_per_res', 'score_per_res_parent', 'shift', 'ss_sc', 'state', 'time', 'topo', 'total_length', 'total_score', 'trimmed_length']
###Markdown
Save individual fastasTODO change to projects dir
###Code
sys.path.insert(0, "~/projects/crispy_shifty") # TODO
from crispy_shifty.utils.io import df_to_fastas
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
scores_df = df_to_fastas(scores_df, prefix="mpnn_seq")
###Output
_____no_output_____
###Markdown
Save a list of outputs
###Code
simulation_name = "02_mpnn_bound_states"
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
with open(os.path.join(output_path, "mpnn_states.list"), "w") as f:
for path in tqdm(scores_df.index):
print(path, file=f)
###Output
_____no_output_____
###Markdown
Concat the pdb.bz2 and fasta paths into a single list, for reasons
###Code
simulation_name = "02_mpnn_bound_states"
output_path = os.path.join(os.getcwd(), f"projects/crispy_shifties/{simulation_name}")
with open(os.path.join(output_path, "mpnn_states.pair"), "w") as f:
for path in tqdm(scores_df.index):
line = path + "____" + path.replace("decoys", "fastas").replace("pdb.bz2", "fa")
print(line, file=f)
###Output
_____no_output_____
###Markdown
Prototyping blocks test `mpnn_bound_state`
###Code
%%time
import pyrosetta
pyrosetta.init()
sys.path.insert(0, "~/projects/crispy_shifty/") # TODO projects
from crispy_shifty.protocols.mpnn import mpnn_bound_state
t = mpnn_bound_state(
None,
**{
'pdb_path': '/mnt/home/pleung/projects/crispy_shifty/projects/crispy_shifties/01_loop_bound_states/decoys/0001/01_loop_bound_states_17f57e75865441a78a0057fb8081b4de.pdb.bz2',
}
)
for i, tppose in enumerate(t):
tppose.pose.dump_pdb(f"{i}.pdb")
tppose.pose.scores
import pyrosetta.distributed.viewer as viewer
ppose = pyrosetta.distributed.io.pose_from_file("test.pdb")
view = viewer.init(ppose, window_size=(1600, 1200))
view.add(viewer.setStyle())
view.add(viewer.setStyle(colorscheme="whiteCarbon", radius=0.10))
view.add(viewer.setHydrogenBonds())
view.add(viewer.setHydrogens(polar_only=True))
view.add(viewer.setDisulfides(radius=0.25))
view()
###Output
_____no_output_____ |
examples/demo_zeropifull.ipynb | ###Markdown
Initialization
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import scqubits as qubit
import numpy as np
###Output
_____no_output_____
###Markdown
Full 0-pi qubit, including coupling to zeta mode $H = H_{0-\pi} + H_\text{int} + H_\zeta$where $ H_{0-\pi}=H_\text{sym} +2E_{C\Sigma}(\delta C_J/C_J)\partial_\phi\partial_\theta +2\,\delta E_J \sin\theta\sin(\phi-\phi_\text{ext}/2)$$ H_\text{int} = 2E_{C\Sigma}dC\,\partial_\theta\partial_\zeta + E_L dE_L \phi\,\zeta$$ H_\zeta = \omega_\zeta a^\dagger a$
###Code
phi_grid = qubit.Grid1d(-8*np.pi, 8*np.pi, 360)
# parameters for the modified 0-\pi circuit
EJ_CONST = 1/3.95
ECJ_CONST = 1/(8.0*EJ_CONST)
ECS_CONST = 10.0**(-3)
zpifull = qubit.FullZeroPi(
zeropi_cutoff = 10,
zeta_cutoff = 40,
grid = phi_grid,
ncut = 30,
EJ = EJ_CONST,
dEJ = 0.05,
EL = 10.0**(-3),
dEL = 0.05,
ECJ = ECJ_CONST,
dCJ = 0.05,
EC = None,
ECS = ECS_CONST,
dC = 0.08,
ng = 0.3,
flux = 0.2
)
print(zpifull)
evals2 = zpifull.eigenvals(evals_count=40)
evals2
###Output
_____no_output_____
###Markdown
Matrix elements $g_{ll'}$ [Dempster et al., text immediately above Eqs. (17) & (18)]
###Code
ev_count = 10
gmat = zpifull.g_coupling_matrix(None, evals_count=ev_count)
plot.matrix(gmat);
###Output
_____no_output_____
###Markdown
Initialization
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import scqubits as qubit
import numpy as np
###Output
_____no_output_____
###Markdown
Full 0-pi qubit, including coupling to zeta mode $H = H_{0-\pi} + H_\text{int} + H_\zeta$where $ H_{0-\pi}=H_\text{sym} +2E_{C\Sigma}(\delta C_J/C_J)\partial_\phi\partial_\theta +2\,\delta E_J \sin\theta\sin(\phi-\phi_\text{ext}/2)$$ H_\text{int} = 2E_{C\Sigma}dC\,\partial_\theta\partial_\zeta + E_L dE_L \phi\,\zeta$$ H_\zeta = \omega_\zeta a^\dagger a$
###Code
phi_grid = qubit.Grid1d(-8*np.pi, 8*np.pi, 360)
# parameters for the modified 0-\pi circuit
EJ_CONST = 1/3.95
ECJ_CONST = 1/(8.0*EJ_CONST)
ECS_CONST = 10.0**(-3)
zpifull = qubit.FullZeroPi(
zeropi_cutoff = 10,
zeta_cutoff = 40,
grid = phi_grid,
ncut = 30,
EJ = EJ_CONST,
dEJ = 0.05,
EL = 10.0**(-3),
dEL = 0.05,
ECJ = ECJ_CONST,
dCJ = 0.05,
EC = None,
ECS = ECS_CONST,
dC = 0.08,
ng = 0.3,
flux = 0.2
)
print(zpifull)
evals2 = zpifull.eigenvals(evals_count=40)
evals2
###Output
_____no_output_____
###Markdown
Matrix elements $g_{ll'}$ [Dempster et al., text immediately above Eqs. (17) & (18)]
###Code
ev_count = 10
gmat = zpifull.g_coupling_matrix(None, evals_count=ev_count)
plot.matrix(gmat);
###Output
_____no_output_____ |
20202/aed_0101_templates.ipynb | ###Markdown
C++ Templates!
###Code
%%writefile template1.cpp
#include <iostream>
#include <string>
using namespace std;
template <typename T>
T sumar(T a, T b) {
return a + b;
}
int main() {
cout << sumar(5, 6) << endl;
cout << sumar(5.7, 6.1) << endl;
cout << sumar(5.7f, 6.1f) << endl;
cout << sumar(' ', ' ') << endl;
cout << sumar<string>("Hola, ", "Mundo!") << endl;
return 0;
}
%%script bash
g++ template1.cpp && ./a.out
%%writefile template2.cpp
#include <iostream>
#include <string>
using namespace std;
template <typename T>
T sum(T* vec, int n) {
T s = 0;
for (int i = 0; i < n; ++i) {
s += vec[i];
}
return s;
}
int main() {
int a[5] = { 1, 2, 3, 4, 5 };
cout << sum<int>(a, 5) << endl;
char b[2] = { ' ', ' ' };
cout << sum<char>(b, 2) << endl;
float c[3] = { 0.5, .5, 0. };
cout << sum<float>((float*)c, 3) << endl;
return 0;
}
%%script bash
g++ template2.cpp && ./a.out
###Output
15
@
1
###Markdown
Templates y .h
###Code
%%writefile template3.h
#pragma once
template<typename T>
class MiClase {
T miAtributo;
public:
MiClase(T miAtributo) : miAtributo(miAtributo) {}
T getMiAtributo();
void setMiAtributo(T miAtributo);
};
#include "template3.cpp"
%%writefile template3.cpp
#include "template3.h"
template <typename T>
T MiClase<T>::getMiAtributo() {
return miAtributo;
}
template <typename T>
void MiClase<T>::setMiAtributo(T miAtributo) {
this->miAtributo = miAtributo;
}
%%writefile template3Test.cpp
#include <iostream>
#include "template3.h"
using namespace std;
int main() {
MiClase<int>* mc = new MiClase<int>(10);
cout << mc->getMiAtributo() << endl;
delete mc;
return 0;
}
%%script bash
g++ template3Test.cpp && ./a.out
###Output
_____no_output_____ |
demos/eda-titanic/eda-titanic.ipynb | ###Markdown
**Exploratory Data Analysis**: The Titanic Dataset (Extended version) Source: [https://github.com/d-insight/code-bank.git](https://github.com/d-insight/code-bank.git) License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository. ------------- Overview In this demo, we will explore the likelihood of surviving the sinking of the RMS Titanic. The Titanic hit a iceberg in 1912 and quickly sank, killing 1,502 out of the 2,224 passengers and crew on board. One most important reason so many people died was that there were not enough lifeboats to serve everyone. Accordingly, it has also been frequently noted that the most **_likely_** people to survive the disaster were women, children, and members of the upper-class. Let's see if that is true.The Titanic case is a classic problem in data science, and it is still an ongoing [Kaggle competition](https://www.kaggle.com/c/titanic). There are many other examples of the Titanic dataset in introductory statistics and Data Science courses, so we also encourage you to look around and see how others have approached the problem. Image source: https://upload.wikimedia.org/wikipedia/commons/9/95/Titanic_sinking%2C_painting_by_Willy_St%C3%B6wer.jpg Introduction We will conduct our EDA and visualization analysis in three parts: 1. Analyze and visualize base-rates 2. Calculate new predictors that can help the analysis ('Feature Engineering) 3. Visualize advanced data characteristics To prepare, let's first review the data tools, visualization tools, and actual data for this problem. Data Structures for Python There are three basic options for loading and working with data in Python: * Pure `Python 3.x` In this approach, you load data directly into "pure" Python data objects, such as lists, sets, and dictionaries (or nested hiearchies of such objects, such as lists-within-lists, lists-within-dicts, dicts-within-dicts, and so on). Although operation on "pure Python" objects can be slow, it also is extremely flexible. * `NumPy` The basic data structures for holding arrays, vectors, and matrices of data are provided by a core package called `NumPy`. NumPy also has a set of linear algebra and numerical fuinctions, but in general such functions are now provided by another package called `scipy` (for scientific computing) and numerical computation is usually done there. `NumPy` has been optimized to run with primitive routines written in `C` (or even `fortran`) and so is orders-of-magnitude faster-running than doing calculations with pure Python. Nevertheless, the data access, subscripting, and slicing of elements for `NumPy` still conforms to the same syntax as pure Python. * `pandas` Most applied Data Science projects (that fit into memory/RAM), now use an "Excel-like" package called `pandas`. Pandas stores data in objects called **dataframes**. Dataframes will become the central type of data object for your work in Data Science (much as Excel Spreadsheets often were for Business Analysts). Dataframes provide many different properties and methods that help you to work with your data more effectively. We will use `pandas` for most of the examples and problems in the class. Visualization for Python There are many ways to visualize data and results in Python. Sometimes that is a good thing - and sometimes it is a bad thing, for there are many ways to do it. Eventually you will want to learn multiple methods, as data scientists often use many different libraries. The following are the most common libraries, with links to their documentation: * `pandas` https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html If you are in a hurry, it also is possible to generate many simple plots directly from the `pandas` library and `dataframe` object. This is a good option when you are moving fast and all you need to so is see a simple histogram or trend line and you already have your data in a pandas dataframe. * `seaborn` https://seaborn.pydata.org/ Seaborn is a simplified and better looking interface that sits on top of the standard `matplotlib` library. Seaborn is often used because it looks great, but also gives you all the ability to go into `matplotlib` to customize graphs for a particular need. * `plotly` and `plotly_express` https://www.plotly.express/ The commercial package `plotly` is a comprehensive toolkit for building interactive, D3 and WebGL charts. Interactive graphs are particularly good for online (web-based) dashboards. As you can see in the plot of Python visualization options below, `plotly` is quickly rising in popularity. To use plotly, however, you need to sign up for a [plotly account](https://plot.ly/python/) and you need an active internet connection. We won't need all of the features in plotly, however, and so we will develop examples using just `plotly_express`. Plotly express is a free, local-to-your-machine, and easier-to-use, API to the plotly service. [See here for documentation](https://www.plotly.express/plotly_express/) for the plotly express API. * `matplotlib` and `pyplot` https://matplotlib.org/ Matplotlib is the core package for graphing in Python, and many other packages build on top of it (i.e., pyplot, pandas, and seaborn). The `matplotlib` object model can be somewhat confusing, which often means writing many lines of code and hours of debugging. To help, matplotlib also comes with an interface library called `pyplot` ([documentation here](https://matplotlib.org/api/pyplot_api.html)) that mimics the MatLab approach to graphing (helpful for many engineers). In general, however, `pyplot` has now been supplanted by the other options above. Although it is faster to get started with plotting by using one of the other options above, eventually you will find that you often need to return to matplotlib in order to "tweak" a layout or to work with more complicated graphs. Image source: EPFL TIS Lab The Titanic Dataset The data is taken from the [Kaggle Titanic Competition](https://www.kaggle.com/c/titanic). It is split between a "training" dataset (where you know the actual outcome) and a "testing" dataset (where you do not know the outcome). If we were actually competing in the Kaggle competition, then we would be trying to predict the unknown testing cases and submitting our predictions to Kaggle to see if we could win. But in this case, the objective is simply to get you started with Python and to familiarize you with the basic data structures and graphing libraries of the Data Science stack. We therefore will ignore the testing dataset and work only with the training data.All of the data that you will need for this demo is in the `titanic.csv` file, located within the same directory as this notebook. We don't know very much about the 891 passengers in the training dataset. The following features are available. Feature name | Description | -------- | -------------- | Survived | Target variable, i.e. survival, where 0 = No, 1 = Yes | PassengerId | Id of the passenger | Pclass | Ticket class, wher 1 = 1st, 2 = 2nd, 3 = 3rd | Name | Passenger name | Sex | Sex | Age | Age in years | SibSp | Num of siblings or spouses aboard the Titanic | Parch | Num of parents or children aboard the Titanic | Ticket | Ticket number, i.e. record ID | Fare | Passenger fare | Cabin | Cabin number | Embarked | Port of Embarkation, where C = Cherbourg, Q = Queenstown, S = Southampton | **Special Notes** * **Pclass**: A proxy for socio-economic status (SES): 1st = Upper class; 2nd = Middle class; 3rd = Lower class. * **Age**: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5 * **SibSp**: The dataset defines family relations as: Sibling = brother, sister, stepbrother, stepsister; Spouse = husband, wife (mistresses and fiancés were ignored) * **Parch**: The dataset defines family relations as: Parent = mother, father; Child = daughter, son, stepdaughter, stepson; Some children travelled only with a nanny, therefore parch=0 for them. -------- **Part 0**: Setup
###Code
# Put all import statements at the top of your notebook -- import some basic and important ones here
# Standard imports
import numpy as np
import pandas as pd
import pandas_profiling
import os
import sys
# Visualization packages
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
# Special code to ignore un-important warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
###Output
_____no_output_____
###Markdown
**Part 1**: Analyze Base Rates and EDAThe most basic "model" to run for any problem is the "average" (or "base rate") outcome. Before we work on more complicated models, it is a good idea to understand how a very simple heuristic will perform -- and then you can determine how much a more complicated model really improves the predictions. Your first model may be too simple and therefore highly "biased" (i.e., systematically low or systematically high for different groupings or levels within the data); but your simple model should also not vary much if/when we pull a new sample from the same underlying population/data generating function. We will therefore conduct some **_exploratory data analysis_** ("**EDA**") and **_visualization_** in this demo to understand the distribution of the outcome for this case. Open and Inspect the Data
###Code
# Open dataset with Pandas
data = pd.read_csv('titanic.csv')
# Count rows and coumns
data.shape
###Output
_____no_output_____
###Markdown
**Part 1a**: EDA - the automated approachInstead of performing all of the steps above manually, you can also run a "profile" on the dataset first, and then drill down into specific cases of interest.
###Code
# Use the automated pandas profiling utility to examine the dataset
data.profile_report()
###Output
_____no_output_____
###Markdown
**Part 1b**: EDA - the manual approachIt is generally a good idea to go beyond the automated approach to EDA. Here are some useful steps for understanding and plotting the data in more detail.
###Code
# Inspect the data (variable names, count non-missing values, review variables types)
data.info()
# Look at the "head" of the dataset
data.head()
# Look at the "tail" of the dataset
data.tail()
# Calculate summary statistics (mean, min, max, std, etc.) for all variables and transpose the output
data.describe().T
# Count missing data per feature
data.isnull().sum()
# Plot missing data (Hint: use a seaborn heatmap to see distribution of isnull() for the dataframe
sns.heatmap(data.isnull(), cbar=False, cmap="YlGnBu_r")
###Output
_____no_output_____
###Markdown
Aside: selecting columns in pandasAs we will select columns from a pandas dataframe many times, it is important to note that there are generally two equivalent ways of doing this. We will use the first approach of passing the column name as a string in square brackets. This distinguishes column selection from method calls.
###Code
# First approach (recommended): pass the column name as a string in square brackets
data['Survived'].describe()
# Second approach: pass the column name as a method call
data.Survived.describe()
###Output
_____no_output_____
###Markdown
Analyze Survival (the basic outcome)
###Code
# Count number of Survivals and Deaths
survivals = sum(data['Survived'])
deaths = len(data[data['Survived'] == False])
assert survivals + deaths == len(data) # not necessary, but this should be true
assert survivals + deaths == data.shape[0] # not necessary, but this also should be true
print('Survivals: ', survivals)
print('Deaths: ', deaths)
print('The base-rate likelihood of survival: ', survivals/(survivals+deaths))
###Output
_____no_output_____
###Markdown
Plotting the target feature "Survived": 4 approaches
###Code
# Plot a histogram of "Survived" using Pandas
# The fastest way - this just uses the pandas dataframe
data['Survived'].hist()
# Plot a histogram of "Survived" using Seaborn
# The fastest way is to reference Column Names as Properties of the Dataframe
sns.countplot(data["Survived"])
# Note that another, "safe" way to code is to pass parameters by name
# sns.countplot(x='Survived', data=data)
###Output
_____no_output_____
###Markdown
Analyze Base-Rate Outcomes, by ConditionAgain, let's start by calculating the frequency (or "base rate") of the outcome variable for different conditions of interest. You can do this most easily by using the `.crosstab()` function from the `pandas` module.
###Code
# Use the pandas crosstab() function to count outcomes by condition
pd.crosstab(data['Pclass'], data['Survived'], margins=False)
# Now show the totals (so you can calculate a conditional marginal base rate).
pd.crosstab(data['Pclass'], data['Survived'], margins=True)
# Use the style() function (chain it onto the end of prior code) to also overlay a heatmap
# i.e., you can still do this in one line of code with .style.background_gradient()
pd.crosstab(data['Pclass'], data['Survived'], margins=True).style.background_gradient()
# If you can't read the results, select your own color scheme
pd.crosstab(data['Pclass'], data['Survived'], margins=True).style.background_gradient(cmap='autumn_r')
# Now do a "three-way" crosstab for Class, Sex, Survival
pd.crosstab([data['Sex'], data['Survived']], data['Pclass'], margins=True)
# Use the pandas dataframe to plot a histogram of Age
data['Age'].hist()
# Increase the number of histogram bins
data['Age'].hist(bins = 40)
# Use seaborn to plot the kernel density (a kdeplot) for Age
facet = sns.FacetGrid(data, aspect = 4)
facet.map(sns.kdeplot,'Age', shade = True)
facet.set(xlim = (0, data['Age'].max()))
# Use Seaborn to plot the kernel density of Fare
facet = sns.FacetGrid(data, aspect=4)
facet.map(sns.kdeplot,'Fare', shade=True)
facet.set(xlim = (0, data['Fare'].max()))
# Redo the above in just 1 line of code, but show both a frequency histogram of counts,
# and a kernel density of the probability density function
# There usually is a simple way to do it with seaborn...
facet = sns.distplot(data['Fare'])
# Use pandas to plot a histogram of Survived, separated by Class
# Hint: figsize is defined in inches
lived = data[data['Survived'] == 1]['Pclass'].value_counts()
died = data[data['Survived'] == 0]['Pclass'].value_counts()
df = pd.DataFrame([lived, died])
df.index = ['Lived', 'Died']
df.plot(kind = 'bar', stacked=True, figsize=(12, 5), title='Survival by Social Economic Class (1st, 2nd, 3rd)')
# Use seaborn barplots to plot Survival as a Function of Class
sns.barplot(x='Pclass', y='Survived', data=data)
plt.ylabel("Survival Rate")
plt.title("Survival as function of Pclass")
plt.show() # this removes the annoying line that references the final object, e.g. "<matplotlib.axes._subplots.AxesSubplot at 0x1a1d5f4e48>"
# Use pandas to plot a histogram of Survived, separated by Sex
lived = data[data['Survived'] == 1]['Sex'].value_counts()
died = data[data['Survived'] == 0]['Sex'].value_counts()
df = pd.DataFrame([lived, died])
df.index = ['Lived', 'Died']
df.plot(kind = 'bar', stacked = True, figsize = (12, 5), title = 'Survival by Gender')
plt.show()
# Use Pandas (with matplotlib customization to make it look good) to draw a pie chart of Survival by Sex
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(16,7))
data['Survived'][data['Sex'] == 'male'].value_counts().plot.pie(ax = ax1)
data['Survived'][data['Sex'] == 'female'].value_counts().plot.pie(ax = ax2, colors = ['C1', 'C0'])
# Now use some matplotlib customization to make your previous plot look cool
f, ax = plt.subplots(1, 2, figsize = (16, 7))
data['Survived'][data['Sex'] == 'male'].value_counts().plot.pie(explode=[0,0.2], autopct='%1.1f%%', ax = ax[0], shadow = True)
data['Survived'][data['Sex'] == 'female'].value_counts().plot.pie(explode=[0,0.2], autopct='%1.1f%%', ax = ax[1], shadow = True, colors = ['C1', 'C0'])
ax[0].set_title('Survived (male)')
ax[1].set_title('Survived (female)')
plt.show()
# Use a seaborn facet grid to jointly examine Sex, Class, and Survival
g = sns.FacetGrid(data, row = 'Sex', col = 'Pclass', hue = 'Survived', margin_titles = True, height = 3, aspect = 1.1)
g.map(sns.distplot, 'Age', kde = False, bins = np.arange(0, 80, 5), hist_kws = dict(alpha=0.6))
g.add_legend()
plt.show()
# Examine the disribution of Fare as a function of Pclass, Sex and Survived
g = sns.FacetGrid(data, row = 'Sex', col = 'Pclass', hue = 'Survived', margin_titles = True, height = 3, aspect = 1.1)
g.map(sns.distplot, 'Fare', kde = False, bins = np.arange(0, 550, 50), hist_kws = dict(alpha = 0.6))
g.add_legend()
plt.show()
###Output
_____no_output_____
###Markdown
Sometimes you have a particular look you need to reproduce in an exhibit. In that case, you will grab the figure and axis objects from matplotlib after they are created by seaborn (or pandas) so that you can reference them to customize properties of each plot.
###Code
# Use the plt.subplots() function from pyplot to capture the figure and subplot objects
# so you can work with both seaborn and matplot lib to make a fully customized distribution plot
LABEL_SURVIVED = 'Survived'
LABEL_DIED = 'Did Not Survive'
fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (12, 6))
women = data[data['Sex'] == 'female']
men = data[data['Sex'] == 'male']
ax = sns.distplot(women[women['Survived'] == 1]['Age'].dropna(), bins = 18, label = LABEL_SURVIVED, ax = axes[0], kde = False)
ax = sns.distplot(women[women['Survived'] == 0]['Age'].dropna(), bins = 40, label = LABEL_DIED, ax = axes[0], kde = False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(men[men['Survived'] == 1]['Age'].dropna(), bins = 18, label = LABEL_SURVIVED, ax = axes[1], kde = False)
ax = sns.distplot(men[men['Survived'] == 0]['Age'].dropna(), bins = 40, label = LABEL_DIED, ax = axes[1], kde = False)
ax.legend()
_ = ax.set_title('Male')
# ADVANCED: combine histograms for many variables related to survival into one composite figure
R = 2
C = 3
fields = ['Survived', 'Sex', 'Pclass', 'SibSp', 'Parch', 'Embarked']
fig, axs = plt.subplots(R, C, figsize = (12, 8))
for row in range(0, R):
for col in range(0, C):
i = row * C + col
ax = axs[row][col]
sns.countplot(data[fields[i]], hue = data["Survived"], ax = ax)
ax.set_title(fields[i], fontsize = 14)
ax.legend(title = "survived", loc = 'upper center')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
**Part 2**: Feature EngineeringUsing domain knowledge, we can create new features that might improve performance of our model at a later stage.
###Code
# Extract the leading "title" from the passenger name, and summarize (count) the different titles
data['Title'] = data['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
data['Title'].value_counts()
###Output
_____no_output_____
###Markdown
**Part 3**: Explore Swarm and Violin PlotsSwarm and violin plots show the same data as plots of counts and/or density distributions (graphs you did earlier), but they also happen to look very cool and can also draw attention to details that you do not see in other plots. If you have extra time, try to make a few of these below.
###Code
# Define some constants (such as PALLET and FIGSIZE) so that all of your figures look consistent
PALETTE = ["lightgreen" , "lightblue"] # you can set a custom palette with a simple list of named color values
FIGSIZE = (13, 7)
# Use a seaborn "swarmplot" to examine survival by age and class.
fig, ax = plt.subplots(figsize = FIGSIZE)
sns.swarmplot(x = 'Pclass', y = 'Age', hue = 'Survived', dodge = True, data = data, palette = PALETTE, size = 7, ax = ax)
plt.title('Survival Events by Age and Class ')
plt.show()
# Use a seaborn "violinplot" to examine survival by age and class.
fig, ax = plt.subplots(figsize = FIGSIZE)
sns.violinplot(x = "Pclass", y = "Age", hue = 'Survived', data=data, split=True, bw = 0.05 , palette = PALETTE, ax = ax)
plt.title('Survival Distributions by Age and Class ')
plt.show()
# Use the catplot function (in just one line of code!) to show the comparable distributions for Class, Age, Sex and Survived
g = sns.catplot(x = "Pclass", y = "Age", hue = "Survived", col = "Sex", data = data, kind = "violin", split = True, bw = 0.05, palette = PALETTE, height = 7, aspect = 0.9, s = 7)
###Output
_____no_output_____
###Markdown
**DSFM Demo**: Exploratory Data Analysis - Titanic Creator: [Data Science for Managers - EPFL Program](https://www.dsfm.ch) Source: [https://github.com/dsfm-org/code-bank.git](https://github.com/dsfm-org/code-bank.git) License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository. ------------- Overview In this demo, we will explore the likelihood of surviving the sinking of the RMS Titanic. The Titanic hit a iceberg in 1912 and quickly sank, killing 1,502 out of the 2,224 passengers and crew on board. One most important reason so many people died was that there were not enough lifeboats to serve everyone. Accordingly, it has also been frequently noted that the most **_likely_** people to survive the disaster were women, children, and members of the upper-class. Let's see if that is true.The Titanic case is a classic problem in data science, and it is still an ongoing [Kaggle competition](https://www.kaggle.com/c/titanic). There are many other examples of the Titanic dataset in introductory statistics and Data Science courses, so we also encourage you to look around and see how others have approached the problem. Image source: https://upload.wikimedia.org/wikipedia/commons/9/95/Titanic_sinking%2C_painting_by_Willy_St%C3%B6wer.jpg Introduction We will conduct our EDA and visualization analysis in three parts: 1. Analyze and visualize base-rates 2. Calculate new predictors that can help the analysis ('Feature Engineering) 3. Visualize advanced data characteristics To prepare, let's first review the data tools, visualization tools, and actual data for this problem. Data Structures for Python There are three basic options for loading and working with data in Python: * Pure `Python 3.x` In this approach, you load data directly into "pure" Python data objects, such as lists, sets, and dictionaries (or nested hiearchies of such objects, such as lists-within-lists, lists-within-dicts, dicts-within-dicts, and so on). Although operation on "pure Python" objects can be slow, it also is extremely flexible. * `NumPy` The basic data structures for holding arrays, vectors, and matrices of data are provided by a core package called `NumPy`. NumPy also has a set of linear algebra and numerical fuinctions, but in general such functions are now provided by another package called `scipy` (for scientific computing) and numerical computation is usually done there. `NumPy` has been optimized to run with primitive routines written in `C` (or even `fortran`) and so is orders-of-magnitude faster-running than doing calculations with pure Python. Nevertheless, the data access, subscripting, and slicing of elements for `NumPy` still conforms to the same syntax as pure Python. * `pandas` Most applied Data Science projects (that fit into memory/RAM), now use an "Excel-like" package called `pandas`. Pandas stores data in objects called **dataframes**. Dataframes will become the central type of data object for your work in Data Science (much as Excel Spreadsheets often were for Business Analysts). Dataframes provide many different properties and methods that help you to work with your data more effectively. We will use `pandas` for most of the examples and problems in the class. Visualization for Python There are many ways to visualize data and results in Python. Sometimes that is a good thing - and sometimes it is a bad thing, for there are many ways to do it. Eventually you will want to learn multiple methods, as data scientists often use many different libraries. The following are the most common libraries, with links to their documentation: * `pandas` https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html If you are in a hurry, it also is possible to generate many simple plots directly from the `pandas` library and `dataframe` object. This is a good option when you are moving fast and all you need to so is see a simple histogram or trend line and you already have your data in a pandas dataframe. * `seaborn` https://seaborn.pydata.org/ Seaborn is a simplified and better looking interface that sits on top of the standard `matplotlib` library. Seaborn is often used because it looks great, but also gives you all the ability to go into `matplotlib` to customize graphs for a particular need. * `plotly` and `plotly_express` https://www.plotly.express/ The commercial package `plotly` is a comprehensive toolkit for building interactive, D3 and WebGL charts. Interactive graphs are particularly good for online (web-based) dashboards. As you can see in the plot of Python visualization options below, `plotly` is quickly rising in popularity. To use plotly, however, you need to sign up for a [plotly account](https://plot.ly/python/) and you need an active internet connection. We won't need all of the features in plotly, however, and so we will develop examples using just `plotly_express`. Plotly express is a free, local-to-your-machine, and easier-to-use, API to the plotly service. [See here for documentation](https://www.plotly.express/plotly_express/) for the plotly express API. * `matplotlib` and `pyplot` https://matplotlib.org/ Matplotlib is the core package for graphing in Python, and many other packages build on top of it (i.e., pyplot, pandas, and seaborn). The `matplotlib` object model can be somewhat confusing, which often means writing many lines of code and hours of debugging. To help, matplotlib also comes with an interface library called `pyplot` ([documentation here](https://matplotlib.org/api/pyplot_api.html)) that mimics the MatLab approach to graphing (helpful for many engineers). In general, however, `pyplot` has now been supplanted by the other options above. Although it is faster to get started with plotting by using one of the other options above, eventually you will find that you often need to return to matplotlib in order to "tweak" a layout or to work with more complicated graphs. Image source: EPFL TIS Lab The Titanic Dataset The data is taken from the [Kaggle Titanic Competition](https://www.kaggle.com/c/titanic). It is split between a "training" dataset (where you know the actual outcome) and a "testing" dataset (where you do not know the outcome). If we were actually competing in the Kaggle competition, then we would be trying to predict the unknown testing cases and submitting our predictions to Kaggle to see if we could win. But in this case, the objective is simply to get you started with Python and to familiarize you with the basic data structures and graphing libraries of the Data Science stack. We therefore will ignore the testing dataset and work only with the training data.All of the data that you will need for this demo is in the `titanic.csv` file, located within the same directory as this notebook. We don't know very much about the 891 passengers in the training dataset. The following features are available. Feature name | Description | -------- | -------------- | Survived | Target variable, i.e. survival, where 0 = No, 1 = Yes | PassengerId | Id of the passenger | Pclass | Ticket class, wher 1 = 1st, 2 = 2nd, 3 = 3rd | Name | Passenger name | Sex | Sex | Age | Age in years | SibSp | Num of siblings or spouses aboard the Titanic | Parch | Num of parents or children aboard the Titanic | Ticket | Ticket number, i.e. record ID | Fare | Passenger fare | Cabin | Cabin number | Embarked | Port of Embarkation, where C = Cherbourg, Q = Queenstown, S = Southampton | **Special Notes** * **Pclass**: A proxy for socio-economic status (SES): 1st = Upper class; 2nd = Middle class; 3rd = Lower class. * **Age**: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5 * **SibSp**: The dataset defines family relations as: Sibling = brother, sister, stepbrother, stepsister; Spouse = husband, wife (mistresses and fiancés were ignored) * **Parch**: The dataset defines family relations as: Parent = mother, father; Child = daughter, son, stepdaughter, stepson; Some children travelled only with a nanny, therefore parch=0 for them. -------- **Part 0**: Setup
###Code
# Put all import statements at the top of your notebook -- import some basic and important ones here
# Standard imports
import numpy as np
import pandas as pd
import pandas_profiling
import os
import sys
# Visualization packages
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
# Special code to ignore un-important warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
###Output
_____no_output_____
###Markdown
**Part 1**: Analyze Base Rates and EDAThe most basic "model" to run for any problem is the "average" (or "base rate") outcome. Before we work on more complicated models, it is a good idea to understand how a very simple heuristic will perform -- and then you can determine how much a more complicated model really improves the predictions. Your first model may be too simple and therefore highly "biased" (i.e., systematically low or systematically high for different groupings or levels within the data); but your simple model should also not vary much if/when we pull a new sample from the same underlying population/data generating function. We will therefore conduct some **_exploratory data analysis_** ("**EDA**") and **_visualization_** in this demo to understand the distribution of the outcome for this case. Open and Inspect the Data
###Code
# Open dataset with Pandas
data = pd.read_csv('titanic.csv')
# Count rows and coumns
data.shape
###Output
_____no_output_____
###Markdown
**Part 1a**: EDA - the automated approachInstead of performing all of the steps above manually, you can also run a "profile" on the dataset first, and then drill down into specific cases of interest.
###Code
# Use the automated pandas profiling utility to examine the dataset
data.profile_report()
###Output
_____no_output_____
###Markdown
**Part 1b**: EDA - the manual approachIt is generally a good idea to go beyond the automated approach to EDA. Here are some useful steps for understanding and plotting the data in more detail.
###Code
# Inspect the data (variable names, count non-missing values, review variables types)
data.info()
# Look at the "head" of the dataset
data.head()
# Look at the "tail" of the dataset
data.tail()
# Calculate summary statistics (mean, min, max, std, etc.) for all variables and transpose the output
data.describe().T
# Count missing data per feature
data.isnull().sum()
# Plot missing data (Hint: use a seaborn heatmap to see distribution of isnull() for the dataframe
sns.heatmap(data.isnull(), cbar=False, cmap="YlGnBu_r")
###Output
_____no_output_____
###Markdown
Aside: selecting columns in pandasAs we will select columns from a pandas dataframe many times, it is important to note that there are generally two equivalent ways of doing this. We will use the first approach of passing the column name as a string in square brackets. This distinguishes column selection from method calls.
###Code
# First approach (recommended): pass the column name as a string in square brackets
data['Survived'].describe()
# Second approach: pass the column name as a method call
data.Survived.describe()
###Output
_____no_output_____
###Markdown
Analyze Survival (the basic outcome)
###Code
# Count number of Survivals and Deaths
survivals = sum(data['Survived'])
deaths = len(data[data['Survived'] == False])
assert survivals + deaths == len(data) # not necessary, but this should be true
assert survivals + deaths == data.shape[0] # not necessary, but this also should be true
print('Survivals: ', survivals)
print('Deaths: ', deaths)
print('The base-rate likelihood of survival: ', survivals/(survivals+deaths))
###Output
_____no_output_____
###Markdown
Plotting the target feature "Survived": 4 approaches
###Code
# Plot a histogram of "Survived" using Pandas
# The fastest way - this just uses the pandas dataframe
data['Survived'].hist()
# Plot a histogram of "Survived" using Seaborn
# The fastest way is to reference Column Names as Properties of the Dataframe
sns.countplot(data["Survived"])
# Note that another, "safe" way to code is to pass parameters by name
# sns.countplot(x='Survived', data=data)
###Output
_____no_output_____
###Markdown
Analyze Base-Rate Outcomes, by ConditionAgain, let's start by calculating the frequency (or "base rate") of the outcome variable for different conditions of interest. You can do this most easily by using the `.crosstab()` function from the `pandas` module.
###Code
# Use the pandas crosstab() function to count outcomes by condition
pd.crosstab(data['Pclass'], data['Survived'], margins=False)
# Now show the totals (so you can calculate a conditional marginal base rate).
pd.crosstab(data['Pclass'], data['Survived'], margins=True)
# Use the style() function (chain it onto the end of prior code) to also overlay a heatmap
# i.e., you can still do this in one line of code with .style.background_gradient()
pd.crosstab(data['Pclass'], data['Survived'], margins=True).style.background_gradient()
# If you can't read the results, select your own color scheme
pd.crosstab(data['Pclass'], data['Survived'], margins=True).style.background_gradient(cmap='autumn_r')
# Now do a "three-way" crosstab for Class, Sex, Survival
pd.crosstab([data['Sex'], data['Survived']], data['Pclass'], margins=True)
# Use the pandas dataframe to plot a histogram of Age
data['Age'].hist()
# Increase the number of histogram bins
data['Age'].hist(bins = 40)
# Use seaborn to plot the kernel density (a kdeplot) for Age
facet = sns.FacetGrid(data, aspect = 4)
facet.map(sns.kdeplot,'Age', shade = True)
facet.set(xlim = (0, data['Age'].max()))
# Use Seaborn to plot the kernel density of Fare
facet = sns.FacetGrid(data, aspect=4)
facet.map(sns.kdeplot,'Fare', shade=True)
facet.set(xlim = (0, data['Fare'].max()))
# Redo the above in just 1 line of code, but show both a frequency histogram of counts,
# and a kernel density of the probability density function
# There usually is a simple way to do it with seaborn...
facet = sns.distplot(data['Fare'])
# Use pandas to plot a histogram of Survived, separated by Class
# Hint: figsize is defined in inches
lived = data[data['Survived'] == 1]['Pclass'].value_counts()
died = data[data['Survived'] == 0]['Pclass'].value_counts()
df = pd.DataFrame([lived, died])
df.index = ['Lived', 'Died']
df.plot(kind = 'bar', stacked=True, figsize=(12, 5), title='Survival by Social Economic Class (1st, 2nd, 3rd)')
# Use seaborn barplots to plot Survival as a Function of Class
sns.barplot(x='Pclass', y='Survived', data=data)
plt.ylabel("Survival Rate")
plt.title("Survival as function of Pclass")
plt.show() # this removes the annoying line that references the final object, e.g. "<matplotlib.axes._subplots.AxesSubplot at 0x1a1d5f4e48>"
# Use pandas to plot a histogram of Survived, separated by Sex
lived = data[data['Survived'] == 1]['Sex'].value_counts()
died = data[data['Survived'] == 0]['Sex'].value_counts()
df = pd.DataFrame([lived, died])
df.index = ['Lived', 'Died']
df.plot(kind = 'bar', stacked = True, figsize = (12, 5), title = 'Survival by Gender')
plt.show()
# Use Pandas (with matplotlib customization to make it look good) to draw a pie chart of Survival by Sex
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(16,7))
data['Survived'][data['Sex'] == 'male'].value_counts().plot.pie(ax = ax1)
data['Survived'][data['Sex'] == 'female'].value_counts().plot.pie(ax = ax2, colors = ['C1', 'C0'])
# Now use some matplotlib customization to make your previous plot look cool
f, ax = plt.subplots(1, 2, figsize = (16, 7))
data['Survived'][data['Sex'] == 'male'].value_counts().plot.pie(explode=[0,0.2], autopct='%1.1f%%', ax = ax[0], shadow = True)
data['Survived'][data['Sex'] == 'female'].value_counts().plot.pie(explode=[0,0.2], autopct='%1.1f%%', ax = ax[1], shadow = True, colors = ['C1', 'C0'])
ax[0].set_title('Survived (male)')
ax[1].set_title('Survived (female)')
plt.show()
# Use a seaborn facet grid to jointly examine Sex, Class, and Survival
g = sns.FacetGrid(data, row = 'Sex', col = 'Pclass', hue = 'Survived', margin_titles = True, height = 3, aspect = 1.1)
g.map(sns.distplot, 'Age', kde = False, bins = np.arange(0, 80, 5), hist_kws = dict(alpha=0.6))
g.add_legend()
plt.show()
# Examine the disribution of Fare as a function of Pclass, Sex and Survived
g = sns.FacetGrid(data, row = 'Sex', col = 'Pclass', hue = 'Survived', margin_titles = True, height = 3, aspect = 1.1)
g.map(sns.distplot, 'Fare', kde = False, bins = np.arange(0, 550, 50), hist_kws = dict(alpha = 0.6))
g.add_legend()
plt.show()
###Output
_____no_output_____
###Markdown
Sometimes you have a particular look you need to reproduce in an exhibit. In that case, you will grab the figure and axis objects from matplotlib after they are created by seaborn (or pandas) so that you can reference them to customize properties of each plot.
###Code
# Use the plt.subplots() function from pyplot to capture the figure and subplot objects
# so you can work with both seaborn and matplot lib to make a fully customized distribution plot
LABEL_SURVIVED = 'Survived'
LABEL_DIED = 'Did Not Survive'
fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (12, 6))
women = data[data['Sex'] == 'female']
men = data[data['Sex'] == 'male']
ax = sns.distplot(women[women['Survived'] == 1]['Age'].dropna(), bins = 18, label = LABEL_SURVIVED, ax = axes[0], kde = False)
ax = sns.distplot(women[women['Survived'] == 0]['Age'].dropna(), bins = 40, label = LABEL_DIED, ax = axes[0], kde = False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(men[men['Survived'] == 1]['Age'].dropna(), bins = 18, label = LABEL_SURVIVED, ax = axes[1], kde = False)
ax = sns.distplot(men[men['Survived'] == 0]['Age'].dropna(), bins = 40, label = LABEL_DIED, ax = axes[1], kde = False)
ax.legend()
_ = ax.set_title('Male')
# ADVANCED: combine histograms for many variables related to survival into one composite figure
R = 2
C = 3
fields = ['Survived', 'Sex', 'Pclass', 'SibSp', 'Parch', 'Embarked']
fig, axs = plt.subplots(R, C, figsize = (12, 8))
for row in range(0, R):
for col in range(0, C):
i = row * C + col
ax = axs[row][col]
sns.countplot(data[fields[i]], hue = data["Survived"], ax = ax)
ax.set_title(fields[i], fontsize = 14)
ax.legend(title = "survived", loc = 'upper center')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
**Part 2**: Feature EngineeringUsing domain knowledge, we can create new features that might improve performance of our model at a later stage.
###Code
# Extract the leading "title" from the passenger name, and summarize (count) the different titles
data['Title'] = data['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
data['Title'].value_counts()
###Output
_____no_output_____
###Markdown
**Part 3**: Explore Swarm and Violin PlotsSwarm and violin plots show the same data as plots of counts and/or density distributions (graphs you did earlier), but they also happen to look very cool and can also draw attention to details that you do not see in other plots. If you have extra time, try to make a few of these below.
###Code
# Define some constants (such as PALLET and FIGSIZE) so that all of your figures look consistent
PALETTE = ["lightgreen" , "lightblue"] # you can set a custom palette with a simple list of named color values
FIGSIZE = (13, 7)
# Use a seaborn "swarmplot" to examine survival by age and class.
fig, ax = plt.subplots(figsize = FIGSIZE)
sns.swarmplot(x = 'Pclass', y = 'Age', hue = 'Survived', dodge = True, data = data, palette = PALETTE, size = 7, ax = ax)
plt.title('Survival Events by Age and Class ')
plt.show()
# Use a seaborn "violinplot" to examine survival by age and class.
fig, ax = plt.subplots(figsize = FIGSIZE)
sns.violinplot(x = "Pclass", y = "Age", hue = 'Survived', data=data, split=True, bw = 0.05 , palette = PALETTE, ax = ax)
plt.title('Survival Distributions by Age and Class ')
plt.show()
# Use the catplot function (in just one line of code!) to show the comparable distributions for Class, Age, Sex and Survived
g = sns.catplot(x = "Pclass", y = "Age", hue = "Survived", col = "Sex", data = data, kind = "violin", split = True, bw = 0.05, palette = PALETTE, height = 7, aspect = 0.9, s = 7)
###Output
_____no_output_____ |
NumPy/09. Performance comparison between NumPy and Python List.ipynb | ###Markdown
9. Basic Performance comparison between `NumPy Array` and `List`
###Code
import numpy as np
import time
list1 = range(150000)
list2 = range(150000)
time.time()
t0 = time.time()
## Multiplications
sq_list = [list1[i]*list2[i] for i in range(len(list1))]
t1 = time.time()
print(t1-t0)
arr1 = np.array(list1)
arr2 = np.array(list2)
t0 = time.time()
arr3 = arr1*arr2
t1 = time.time()
print(t1-t0)
###Output
0.0004642009735107422
|
examples/trades/TradesExample.ipynb | ###Markdown
Trades Example Use with the following artifacts: 1) trades.xml 2) traders.xml 3) trades.csv 4) traders.csv 5) eventgenerator.xml Set the following variables to work in your environment
###Code
esp_url = "http://espsrv01:7777" # your ESP server
path = "." # the path to your artifacts
###Output
_____no_output_____
###Markdown
The first step is to import the ESPPy package along with Visuals to create visualizations.
###Code
import esppy
from esppy.espapi.visuals import Visuals
###Output
_____no_output_____
###Markdown
Start an ESP server before running the next cell. Enter the appropriate ('http:host:port') as the argument to the ESP constructor.
###Code
server = esppy.ESP(esp_url)
server
###Output
_____no_output_____
###Markdown
Create the server connection and visuals
###Code
conn = server.createServerConnection()
visuals = Visuals()
server.server_info
###Output
_____no_output_____
###Markdown
List any projects that are active on the server. If you have not loaded projects to the server, this returns an empty dictionary.
###Code
server.get_projects()
###Output
_____no_output_____
###Markdown
List windows within the project. If you have not loaded projects to the server, this returns an empty dictionary.
###Code
server.get_windows()
project_path = path + "/trades.xml"
project_xml = project_path
###Output
_____no_output_____
###Markdown
Load the project 'trades_proj' into the esppy server session
###Code
trades_project = server.load_project(project_xml, name='trades_proj')
###Output
_____no_output_____
###Markdown
Confirm that the project 'trades_proj' has been successfully uploaded to the esp server
###Code
server.get_projects()
server.get_windows()
###Output
_____no_output_____
###Markdown
List only the windows that are contained in the 'trades_proj' project
###Code
trades_project.get_windows()
###Output
_____no_output_____
###Markdown
Output a list of only the continuous queries that are in the 'trades_project' project
###Code
trades_project.queries
###Output
_____no_output_____
###Markdown
List all windows that are a part of the continuous query, 'trades_cq'
###Code
trades_project.queries['trades_cq'].windows
###Output
_____no_output_____
###Markdown
Produce a graphical representation of the continuous query, 'trades_cq'
###Code
trades_project.queries['trades_cq']
###Output
_____no_output_____
###Markdown
Display the XML of the project
###Code
print(trades_project.to_xml(pretty=True))
###Output
<engine>
<projects>
<project name="trades_proj" pubsub="auto" threads="4">
<contqueries>
<contquery name="trades_cq">
<windows>
<window-join name="AddTraderName">
<join type="leftouter">
<conditions>
<fields left="traderID" right="ID" />
</conditions>
</join>
<output>
<field-selection name="security" source="l_security" />
<field-selection name="quantity" source="l_quantity" />
<field-selection name="price" source="l_price" />
<field-selection name="traderID" source="l_traderID" />
<field-selection name="time" source="l_time" />
<field-selection name="name" source="r_name" />
</output>
</window-join>
<window-aggregate name="BySecurity">
<schema>
<fields>
<field key="true" name="security" type="string" />
<field key="false" name="quantityTotal" type="double" />
<field key="false" name="costTotal" type="double" />
</fields>
</schema>
<output>
<field-expr>ESP_aSum(quantity)</field-expr>
<field-expr>ESP_aSum(totalCost)</field-expr>
</output>
</window-aggregate>
<window-filter name="LargeTrades">
<expression>quantity >= 100</expression>
</window-filter>
<window-compute name="TotalCost">
<description>
This is a compute window. The non-key fields are computed
in the output section.
</description>
<schema>
<fields>
<field key="true" name="tradeID" type="string" />
<field key="false" name="security" type="string" />
<field key="false" name="quantity" type="int32" />
<field key="false" name="price" type="double" />
<field key="false" name="totalCost" type="double" />
<field key="false" name="traderID" type="int64" />
<field key="false" name="time" type="stamp" />
<field key="false" name="name" type="string" />
</fields>
</schema>
<output>
<field-expr>security</field-expr>
<field-expr>quantity</field-expr>
<field-expr>price</field-expr>
<field-expr>price*quantity</field-expr>
<field-expr>traderID</field-expr>
<field-expr>time</field-expr>
<field-expr>name</field-expr>
</output>
</window-compute>
<window-source name="Traders">
<schema>
<fields>
<field key="true" name="ID" type="int64" />
<field key="false" name="name" type="string" />
</fields>
</schema>
</window-source>
<window-source index="pi_RBTREE" name="Trades">
<schema>
<fields>
<field key="true" name="tradeID" type="string" />
<field key="false" name="security" type="string" />
<field key="false" name="quantity" type="int32" />
<field key="false" name="price" type="double" />
<field key="false" name="traderID" type="int64" />
<field key="false" name="time" type="stamp" />
</fields>
</schema>
</window-source>
</windows>
<edges>
<edge source="LargeTrades" target="AddTraderName" />
<edge source="Traders" target="AddTraderName" />
<edge source="Trades" target="LargeTrades" />
<edge source="AddTraderName" target="TotalCost" />
<edge source="TotalCost" target="BySecurity" />
</edges>
</contquery>
</contqueries>
</project>
</projects>
</engine>
###Markdown
Create a new project named 'my_proj'
###Code
my_project=server.create_project('my_proj')
my_project
###Output
_____no_output_____
###Markdown
Add a continuous query called 'trades_cq' to the project
###Code
my_query = my_project.add_query('trades_cq')
my_query
###Output
_____no_output_____
###Markdown
Continue to build your project by adding a source window called 'Trades' to the project. Specify any input variables in the schema along with their datatype. You may choose to add a key to a variable by placing a * after its name.After creating the window, display an image of it.
###Code
source_trades = server.SourceWindow(name='Trades',
schema=('tradeID*:string', 'security:string', 'quantity:int32', 'price:double', 'traderID:int64', 'time:stamp'),
index_type='pi_RBTREE')
source_trades
###Output
_____no_output_____
###Markdown
Add the source window'Trades' to the continuous query 'trades_cq'. Then display an image of the project so far.
###Code
my_query.add_window(source_trades)
my_project
###Output
_____no_output_____
###Markdown
Create another source window that receives data about the Traders. Name this new window 'Traders'.Note that the input variable tradesID is specified to be a key. Then display an image of the Source window 'Traders'.
###Code
source_traders = server.SourceWindow(name='Traders', schema=('tradeID*:string', 'name:string'))
source_traders
###Output
_____no_output_____
###Markdown
Make a FilterWindow called 'Large Trades'. Add an expression to the FilterWindow, that defines what data passes through it. In this case, let Large trades that have a quantity that is at least 100 pass through.
###Code
filter_large = server.FilterWindow(name='LargeTrades')
filter_large.set_expression('quantity >= 100')
###Output
_____no_output_____
###Markdown
Create a JoinWindow called 'AddTraderName' and specify the type of join as well as the join conditions.Define field selections and specify the variable pairs to be joined.Place 'l_' before all variables that come from the left join, and 'r_' before those from the right join.Display the field selections. Then run a loop to perform the actual join.
###Code
join = server.JoinWindow(name='AddTraderName', type='leftouter', conditions=[('tradeID', 'tradeID')])
field_selections = [['security', 'l_security'], ['quantity', 'l_quantity'], ['price', 'l_price'], ['traderID', 'l_traderID'], ['time', 'l_time'], ['name', 'r_name']]
field_selections
for i in range(len(field_selections)):
join.add_field_selection(field_selections[i][0], field_selections[i][1])
###Output
_____no_output_____
###Markdown
Add a compute window to the project called 'TotalCost' to calculate price*quantity.Though the variable totalCost does not exist yet, you must specify it in the output schema. Additionally, in the schema you must include any variables that are needed to perform the calculationThen, add field expressions that define how each variable in your output schema is created.Notice that variables are created in the order that they were specified in your output schema.
###Code
compute = server.ComputeWindow(name='TotalCost',
schema=('tradeID*:string', 'security:string', 'quantity:int32',
'price:double', 'totalCost:double', 'traderID:int64',
'time:stamp', 'name:string'))
compute.add_field_expressions('security', 'quantity', 'price', 'price*quantity', 'traderID', 'time', 'name')
compute
###Output
_____no_output_____
###Markdown
Add an aggregate window to our project, and call it 'BySecurity'. In this window you compute quantityTotal and costTotal. Most importantly, the calculations are done by the specified key value, security*.Add field expressions to the calculate window to declare how the values in the output schema are computed. Here, you calculate the 'quantityTotal' by 'ESP_aSum(quantity)', and 'costTotal' by 'ESP_aSum(totalCost)'. Then display the aggregate window along with its schema.
###Code
aggregate = server.AggregateWindow(name='BySecurity', schema=('security*:string', 'quantityTotal:double',
'costTotal:double'))
aggregate.add_field_expressions('ESP_aSum(quantity)', 'ESP_aSum(totalCost)')
aggregate
###Output
_____no_output_____
###Markdown
Assemble the project by defining edges between each of the windows.Create an edge between the 'Trades' Source window and the 'LargeTrades' Filter window with the data role.Create an edge between the filterWindow 'LargeTrades' and the the join window 'AddTraderName' with the role 'left', specifying the left join.Create an edge between the source window, 'Traders' and the join window 'AddTraderName' with the role 'right', specifying the right join.Create an edge between the join window 'AddTraderName' and the compute window, 'TotalCost' with the data role. Create an edge between the aggregate window, 'BySecurity' and the compute window, 'TotalCost' with the data role.
###Code
source_trades.add_target(filter_large, role='data')
filter_large.add_target(join, role='left')
source_traders.add_target(join, role='right')
join.add_target(compute, role='data')
compute.add_target(aggregate, role='data')
my_query.add_windows(source_trades, source_traders, filter_large, join, compute, aggregate)
my_project
server.load_project(my_project)
server.get_projects()
print(my_project.to_xml(pretty=True))
trades_project['trades_cq'].to_graph(schema=True)
print(trades_project['trades_cq']['Traders'].to_xml(pretty=True))
###Output
<window-source name="Traders">
<schema>
<fields>
<field key="true" name="ID" type="int64" />
<field key="false" name="name" type="string" />
</fields>
</schema>
</window-source>
###Markdown
The code that follows explores two methods for publishing data into the project. The first publishes data from a .csv file into a source window.Begin by opening and reading a file that contains trades data.
###Code
trades_csv = open(path + "/trades.csv").read()
###Output
_____no_output_____
###Markdown
Next, open and read a file that contains traders data.
###Code
traders_csv = open(path + "/traders.csv").read()
###Output
_____no_output_____
###Markdown
Use print to confirm that the data on trades has been read
###Code
print(trades_csv)
###Output
i,n,TID1234321,ibm,1000,100.1,10002,08/Jul/2012:08:10:00.000000
i,n,TID1234322,sap,750,34.2,10003,08/Jul/2012:08:10:01.000345
i,n,TID1234323,ibm,1000,100.2,10004,08/Jul/2012:08:10:12.000001
i,n,TID1234324,ibm,1000,100.3,10004,08/Jul/2012:08:10:13.000002
i,n,TID1234325,ibm,1000,100.4,10004,08/Jul/2012:08:10:13.000002
i,n,TID1234326,sap,1000,34.3,10003,08/Jul/2012:08:10:13.000003
i,n,TID1234327,ibm,1000,100.3,10002,08/Jul/2012:08:10:13.000004
i,n,TID1234328,sap,1000,32,10003,08/Jul/2012:08:10:13.000005
i,n,TID1234329,ibm,1000,100,10004,08/Jul/2012:08:10:13.000006
i,n,TID1234330,sap,90,32,10003,08/Jul/2012:08:10:13.000011
i,n,TID1234331,ibm,80,100.3,10004,08/Jul/2012:08:10:13.000012
###Markdown
Isolate the two source windows('Trades' and 'Traders') into which data is published.
###Code
w_trades = trades_project['trades_cq']['Trades']
w_traders = trades_project['trades_cq']['Traders']
###Output
_____no_output_____
###Markdown
Publish data into the 'Trades' and 'Traders' source windows
###Code
w_trades.publish_events(trades_csv, dateformat='%d/%b/%Y:%H:%M:%S', format='csv')
w_traders.publish_events(traders_csv, format='csv')
###Output
_____no_output_____
###Markdown
Confirm that the data has been published into the 'Trades' source window
###Code
w_trades.get_events()
###Output
_____no_output_____
###Markdown
After events begin to stream in, retrieve them from the 'Trades' source window
###Code
w_trades2 = trades_project['trades_cq']['Trades']
w_trades2.get_events()
###Output
_____no_output_____
###Markdown
When you specify a streaming chart, it creates an independent subscriber to the associated window. Thus, you do not need to subscribe to that window on a separate line of code beforehand.
###Code
from esppy.plotting import StreamingChart
w_BySecurity=trades_project['trades_cq']['BySecurity']
collection = conn.getEventCollection(w_BySecurity)
securityQuantity = visuals.createBarChart(collection,y="quantityTotal",title="Total Quantity Sold By Security")
securityQuantity
w_trades2 = trades_project['trades_cq']['Trades']
w_trades2.subscribe()
w_trades2
w_trades2.info()
w_trades2.head()
w_trades2.describe()
stream = conn.getEventStream(w_trades2)
quantAndPrice = visuals.createTimeSeries(stream,time="time",y=["quantity","price"])
quantAndPrice
###Output
_____no_output_____ |
Labs/Laboratorio-01.ipynb | ###Markdown
MAT281 - 2° Semestre 2020 Profesor: Francisco Alfaro Medina Problema 01 a) Calcular el número $\pi$ Al escribir la serie, la función queda de la siguiente manera:
###Code
def Calculo_pi(n):
suma = 0 #En esta variable tendremos la serie
for k in range(1,n + 1):
suma = ((-1)**(k + 1))/(2*k - 1) + suma #Escribimos la serie dada para multiplicar por 4 al final
return 4 * suma
Calculo_pi(3)
Calculo_pi(100000)
###Output
_____no_output_____
###Markdown
b) Calcular el número $e$ Necesitamos una función para los factoriales ```factorial()``` presentes como denominadores en la serie, por lo tanto las funciones quedan de la siguiente manera:
###Code
def factorial(numero): #Esta funcion es para el factorial dado en el denominador de la serie
if numero == 1 or numero == 0:
return 1
else:
return (numero * factorial(numero - 1))
def Calculo_e(largo):
suma = 0 #Esta sera la serie
for k in range(0,largo):
denominador = factorial(k)
suma = 1/denominador + suma #Vamos escribiendo la serie termino a termino
return suma
Calculo_e(3)
Calculo_e(1000)
###Output
_____no_output_____
###Markdown
Problema 02 Como el problema se centra mayormente en la suma de sus divisores propios, se crea la función para obtener estos ```suma_propios()```, y luego la función buscada la que nos dirá si son o no amigos ```Amigos()```.
###Code
def suma_propios(numero): #Esta funcion es para devolver la suma de los divisores de un numero
if numero == 1:
return 0
divisores = []
numero2 = int(numero/2 + 1) #hasta la mitad del numero, ya que mas grande no es divisible por este
for k in range(1,numero2):
if numero % k == 0:
divisores.append(k) #Agregamos a una lista y sumamos
return sum(divisores)
def Amigos(numero1,numero2):
if suma_propios(numero1) == numero2 and numero1 == suma_propios(numero2): #Finalmente vemos si se
return True #cumple condicion de
return False #numeros amigos
Amigos(220,284)
Amigos(6,5)
###Output
_____no_output_____
###Markdown
Problema 03 La función de ```Collatz()``` queda de la siguiente manera:
###Code
def Collatz(Numero):
if Numero <= 1:
return 1
collatz = [Numero] #Agregamos el primer numero a la lista
while Numero != 1:
if Numero % 2 == 1: #Luego agregamos las operaciones dadas para cada numero
Numero = int(Numero * 3 + 1) #que nos de hasta llegar al 1
collatz.append(Numero)
else:
Numero = int(Numero / 2)
collatz.append(Numero)
return collatz
Collatz(9)
###Output
_____no_output_____
###Markdown
Problema 04 Como la función ```Goldbach()``` es la suma de dos numeros primos, necesitamos verificar primero que los numeros que sumaremos son numeros primos o no, para eso está la función ```esonoprimo()```, y al trabajar al final con solo numeros primos se busca la combinación que cumpla con el objetivo:
###Code
def esonoprimo(numero): #Esta funcion revisa si es o no primo el numero dado
if numero < 2: # 0 si no es primo
return 0 # 1 si lo es
elif numero == 2:
return 1
numero2 = int(numero / 2 + 1)
for k in range(2,numero2):
if numero % k == 0:
return 0
return 1
def Goldbach(Numero):
if Numero <= 3: #si el numero es menor o igual a 3 este no puede ser suma de 2 primos
return False
primos = [k for k in range(2,Numero) if esonoprimo(k)==1] #Agregamos los numeros primos menores
for elemento in primos: #a nuestro numero en cuestion
for number in primos:
if elemento + number == Numero: #Vemos alguna combinacion de numeros primos
primos.append(elemento) #que de el numero en cuestion
primos.append(number) #Por lo dicho por Goldbach
return (primos[-2],primos[-1]) #esta condicion siempre se cumple
Goldbach(4)
Goldbach(6)
Goldbach(8)
Goldbach(1000)
###Output
_____no_output_____ |
opencharts/medium-t500-mean-spread.ipynb | ###Markdown
Ensemble mean and spread for 500 hPa geopotential This notebook will provide you guidance how to explore and plot ECMWF open dataset to produce the map from the ECMWF open charts web product. The original product can be found on this link: https://apps.ecmwf.int/webapps/opencharts/products/medium-t500-mean-spread The full list of available Open data products can be found [here](https://www.ecmwf.int/en/forecasts/datasets/open-data), and more information can be found in the [User documentation](https://confluence.ecmwf.int/display/UDOC/ECMWF+Open+Data+-+Real+Time). Access to ECMWF Open data is governed by the Creative Commons CC-BY-4.0 licence and associated [Terms of Use](https://apps.ecmwf.int/datasets/licences/general/). In applying this licence, ECMWF does not waive the privileges and immunities granted to it by virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction To find out how to obtain the access to the full forecast dataset at higher resolution please visit our [Access page](https://www.ecmwf.int/en/forecasts/accessing-forecasts). Retrieve DataThis product takes in input 1 parameter:* [Geopotential height](https://apps.ecmwf.int/codes/grib/param-db/?id=156) In this example, we will use: - [**ecmwf.opendata**](https://github.com/ecmwf/ecmwf-opendata) Client to download the data- [**ecmwf.data**](https://github.com/ecmwf/ecmwf-data) library to read and process the data - [**magpye**](https://magpye.readthedocs.io) to plot the result First we need to install them in the current Jupyter kernel: Note: If you are running the notebook on MyBinder or already have the libraries installed, go directly to importing the libraries.Note: If you don't have these libraries installed, click on three dots below, uncomment the code and run the next cell.
###Code
#!pip install ecmwf-data ecmwf-opendata magpye
import ecmwf.data as ecdata
from magpye import GeoMap
from ecmwf.opendata import Client
client = Client("ecmwf", beta=True)
###Output
_____no_output_____
###Markdown
For the geopotential height at 500 hPa, ensemble mean and spread are available for download. Ensemble mean and ensemble spread have different **type** in the request. One data request can have only one type, so we need to have two requests to download this datasets.
###Code
parameters = ['gh']
em_filename = 'medium-t500-mean-spread_em.grib'
es_filename = 'medium-t500-mean-spread_es.grib'
###Output
_____no_output_____
###Markdown
Setting date to 0 will download today's data. Removing date and time altogether from the request will download the latest data. Try commenting out date and time to download latest forecast!
###Code
client.retrieve(
date=-1,
time=0,
step=144,
stream="enfo",
type="em",
levtype="pl",
levelist=[500],
param=parameters,
target=em_filename
)
client.retrieve(
date=-1,
time=0,
step=144,
stream="enfo",
type="es",
levtype="pl",
levelist=[500],
param=parameters,
target=es_filename
)
###Output
###Markdown
Reading and processing the dataNow we can use **ecmwf.data** to read the file.
###Code
gh_em = ecdata.read(em_filename)
gh_es = ecdata.read(es_filename)
###Output
_____no_output_____
###Markdown
The **describe()** function will give us the overview of the dataset.
###Code
gh_em.describe('gh')
gh_es.describe()
###Output
_____no_output_____
###Markdown
We can use **ls()** function to list all the fields in the file we downloaded.
###Code
gh_es.ls()
###Output
_____no_output_____
###Markdown
Geopotential height has units gpm (geopotential meters), but on the ECMWF Open charts it is plotted in geopotential decameters. To reproduce the plot we need to divide by 10.
###Code
gh_em /= 10
gh_es = gh_es/10
###Output
_____no_output_____
###Markdown
Plotting the dataAnd finally, we can plot the data on the map.
###Code
fig = GeoMap(area_name='europe')
fig.coastlines(land_colour="cream",resolution="medium")
fig.contour_shaded(gh_es, style="msl_spread_magenta")
fig.contour_lines(gh_em, style="red_i5")
fig.coastlines(resolution="medium")
fig.gridlines()
fig.title(["Ensemble mean and spread for 300 hPa geopotential",
"START TIME: <grib_info key='base-date' format='%a %d %B %Y %H' />",
"VALID TIME: <grib_info key='valid-date' format='%a %d %B %Y %H' />, STEP: <grib_info key='step' />"])
fig.legend()
fig.footer("© European Centre for Medium-Range Weather Forecasts (ECMWF) Source: www.ecmwf.int Licence: CC-BY-4.0 and ECMWF Terms of Use (https://apps.ecmwf.int/datasets/licences/general/)", logo='ecmwf')
fig.show()
###Output
_____no_output_____
###Markdown
Ensemble mean and spread for 500 hPa geopotential This notebook will provide you guidance how to explore and plot ECMWF open dataset to produce the map from the ECMWF open charts web product. The original product can be found on this link: https://apps.ecmwf.int/webapps/opencharts/products/medium-t500-mean-spread The full list of available Open data products can be found [here](https://www.ecmwf.int/en/forecasts/datasets/open-data), and more information can be found in the [User documentation](https://confluence.ecmwf.int/display/UDOC/ECMWF+Open+Data+-+Real+Time). Access to ECMWF Open data is governed by the Creative Commons CC-BY-4.0 licence and associated [Terms of Use](https://apps.ecmwf.int/datasets/licences/general/). In applying this licence, ECMWF does not waive the privileges and immunities granted to it by virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction To find out how to obtain the access to the full forecast dataset at higher resolution please visit our [Access page](https://www.ecmwf.int/en/forecasts/accessing-forecasts). Retrieve DataThis product takes in input 1 parameter:* [Geopotential height](https://apps.ecmwf.int/codes/grib/param-db/?id=156) In this example, we will use: - [**ecmwf.opendata**](https://github.com/ecmwf/ecmwf-opendata) Client to download the data- [**ecmwf.data**](https://github.com/ecmwf/ecmwf-data) library to read and process the data - [**magpye**](https://magpye.readthedocs.io) to plot the result First we need to install them in the current Jupyter kernel: Note: If you are running the notebook on MyBinder or already have the libraries installed, go directly to importing the libraries.Note: If you don't have these libraries installed, click on three dots below, uncomment the code and run the next cell.
###Code
#!pip install ecmwf-data ecmwf-opendata magpye
import ecmwf.data as ecdata
from magpye import GeoMap
from ecmwf.opendata import Client
client = Client("ecmwf", beta=True)
###Output
_____no_output_____
###Markdown
For the geopotential height at 500 hPa, ensemble mean and spread are available for download. Ensemble mean and ensemble spread have different **type** in the request. One data request can have only one type, so we need to have two requests to download this datasets.
###Code
parameters = ['gh']
em_filename = 'medium-t500-mean-spread_em.grib'
es_filename = 'medium-t500-mean-spread_es.grib'
em_filename
###Output
_____no_output_____
###Markdown
Setting date to 0 will download today's data. Removing date and time altogether from the request will download the latest data. Try commenting out date and time to download latest forecast!
###Code
client.retrieve(
date=0,
time=0,
step=144,
stream="enfo",
type="em",
levtype="pl",
levelist=[500],
param=parameters,
target=em_filename
)
client.retrieve(
date=0,
time=0,
step=144,
stream="enfo",
type="es",
levtype="pl",
levelist=[500],
param=parameters,
target=es_filename
)
###Output
###Markdown
Reading and processing the dataNow we can use **ecmwf.data** to read the file.
###Code
gh_em = ecdata.read(em_filename)
gh_es = ecdata.read(es_filename)
###Output
_____no_output_____
###Markdown
The **describe()** function will give us the overview of the dataset.
###Code
gh_em.describe('gh')
gh_es.describe()
###Output
_____no_output_____
###Markdown
We can use **ls()** function to list all the fields in the file we downloaded.
###Code
gh_es.ls()
###Output
_____no_output_____
###Markdown
Geopotential height has units gpm (geopotential meters), but on the ECMWF Open charts it is plotted in geopotential decameters. To reproduce the plot we need to divide by 10.
###Code
gh_em /= 10
gh_es = gh_es/10
###Output
_____no_output_____
###Markdown
Plotting the dataAnd finally, we can plot the data on the map.
###Code
fig = GeoMap(area_name='europe')
fig.coastlines(land_colour="cream",resolution="medium")
fig.contour_shaded(gh_es, style="magenta_50")
fig.contour_lines(gh_em, style="red_i5")
fig.coastlines(resolution="medium")
fig.gridlines()
fig.title(["Ensemble mean and spread for 300 hPa geopotential",
"START TIME: <grib_info key='base-date' format='%a %d %B %Y %H' />",
"VALID TIME: <grib_info key='valid-date' format='%a %d %B %Y %H' />, STEP: <grib_info key='step' />"])
fig.legend()
fig.footer("© European Centre for Medium-Range Weather Forecasts (ECMWF) Source: www.ecmwf.int Licence: CC-BY-4.0 and ECMWF Terms of Use (https://apps.ecmwf.int/datasets/licences/general/)", logo='ecmwf')
fig.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/Chapter 2 - Working With Lists-checkpoint.ipynb | ###Markdown
Chapter 2: Working With ListsMuch of the remainder of this book is dedicated to using data structures to produce analysis that is elegant and efficient. To use the words of economics, you are making a long-term investment in your human capital by working through these exercises. Once you have invested in these fixed-costs, you can work with data at low marginal cost.If you are familiar with other programming languages, you may be accustomed to working with arrays. An array is must be cast to house particular data types (_float_, _int_, _string_, etc…). By default, Python works with dynamic lists instead of arrays. Dynamic lists are not cast as a particular type. Working with Lists|New Concepts | Description|| --- | --- || Dynamic List | A dynamic list is encapsulated by brackets _([])_. A list is mutable. Elements can be added to or deleted from a list on the fly.|| List Concatenation | Two lists can be joined together in the same manner that strings are concatenated. || List Indexing | Lists are indexed with the first element being indexed as zero and the last element as the length of (number of elements in) the list less one. Indexes are called using brackets – i.e., _lst[0]_ calls the 0th element in the list. |In later chapters, we will combine lists with dictionaries to essential data structures. We will also work with more efficient and convenient data structures using the numpy and pandas libraries.Below we make our first lists. One will be empty. Another will contain integers. Another will have floats. Another strings. Another will mix these:
###Code
#lists.py
empty_list = []
int_list = [1,2,3,4,5]
float_list = [1.0,2.0,3.0,4.0,5.0]
string_list = ["Many words", "impoverished meaning"]
mixed_list = [1,2.0, "Mix it up"]
print(empty_list)
print(int_list)
print(float_list)
print(string_list)
print(mixed_list)
###Output
[]
[1, 2, 3, 4, 5]
[1.0, 2.0, 3.0, 4.0, 5.0]
['Many words', 'impoverished meaning']
[1, 2.0, 'Mix it up']
###Markdown
Often we will want to transform lists. In the following example, we will concatenate two lists, which means we will join the lists together:
###Code
#concatenateLists
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
join_lists = list1 + list2
print("list1:", list1)
print("list2:", list2)
print(join_lists)
###Output
list1: [5, 4, 9, 10, 3, 5]
list2: [6, 3, 2, 1, 5, 3]
[5, 4, 9, 10, 3, 5, 6, 3, 2, 1, 5, 3]
###Markdown
We have joined the lists together to make one long list. We can already observe one way in which Python will be useful for helping us to organize data. If we were doing this in a spread sheet, we would have to identify the row and column values of the elements or copy and paste the desired values into new rows or enter formulas into cells. Python accomplishes this for us with much less work.For a list of numbers, we will usually perform some arithmetic operation or categorize these values in order to identify meaningful subsets within the data. This requires access the elements, which Python allows us to do efficiently.In the next exercise we will call elements by index number from the same lists we have already made. We will use the list’s append method to make a copy of a list. The append method adds an element to the end of a list.
###Code
#copyListElementsForLoop.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.append(list1[i])
k = len(list2)
for i in range(k):
list3.append(list2[i])
print("list3 elements:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3 elements: [5, 4, 9, 10, 3, 5, 6, 3, 2, 1, 5, 3]
###Markdown
For Loops and _range()_| New Concepts | Description || --- | --- || _list(obj)_ | List transforms an iterable object, such as a tuple or set, into a dynamic list. || _range(j, k , l)_ | Identifies a range of integers from _j _ to _k–1_ separated by some interval _l_. ||_len(obj)_ | Measure the length of an iterable object. |We can use a for loop to more efficiently execute this task. As we saw in the last chapter, the for loop will execute a series of elements: for element in list. Often, this list is a range of numbers that represent the index of a dynamic list. For this purpose we call:
###Code
for i in range(j, k, l):
<execute script>
###Output
_____no_output_____
###Markdown
The for loop cycles through all integers of interval _l _ between _j _ and _k - 1_, executing a script for each value. This script may explicitly integrate the value _i_. If you do not specify a starting value, _j _, the range function assumes that you are calling an array of elements from _0 _ to _j _. Likewise, if you do not specify an interval, _l _, range assumes that this interval is _1 _. Thus, _for i in range(k)_ is interpreted as _for i in range(0, k, 1)_. We will again use the loop in its simplest form, cycling through number from _0 _ to _(k – 1)_, where the length of the list is the value _k _. These cases are illustrated below in _range.py_.
###Code
#range.py
list1 = list(range(9))
list2 = list(range(-9,9))
list3 = list(range(-9,9,3))
print(list1)
print(list2)
print(list3)
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8]
[-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
[-9, -6, -3, 0, 3, 6]
###Markdown
The for loop will automatically identify the elements contained in _range()_ without requiring you to call _list()_. This is illustrated below in _forLoopAndRange.py_.
###Code
#forLoopAndRange.py
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
Having printed _i for all i in range(0, 10, 1)_, we produce a set of integers from 0 to 9.If we were only printing index numbers from a range, for loops would not be very useful. For loops can be used to produce a wide variety of outputs. Often, you will call a for loop to cycle through the index of a particular array. Since arrays are indexed starting with 0 and for loops also assume 0 as an initial value, cycling through a list with a for loop is straight-forward. For a list named _A _, just use the command:
###Code
for i in range(len(A)):
<execute script>
###Output
_____no_output_____
###Markdown
This command will call all integers between 0 and 1 less than the length of _A _. In other words, it will call all indexers associated with _A _.
###Code
#copyListElementsForLoop.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.append(list1[i])
k = len(list2)
for i in range(k):
list3.append(list2[i])
print("list3 elements:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3 elements: [5, 4, 9, 10, 3, 5, 6, 3, 2, 1, 5, 3]
###Markdown
Creating a New List with Values from Other Lists| New Concepts | Description || --- | --- || List Methods i.e., _.append()_, _.insert()_ | List methods append and insert increse the length of a list by adding in element to the list. || If Statements | An if statement executes the block of code contained in it if conditions stipulated by the if statement are met (they return True). || Else Statement | In the case that the conditions stipulated by an if statement are not met, and else statement executes an alternate block of code | | Operator i.e., _==_, _!=_, _ _, _=_ | The operator indicates the condition relating two variables that is to be tested. |We can extend the exercise by summing the ith elements in each list. In the exercise below, _list3_ is the sum of the ith elements from _list1_ and _list2_.
###Code
#addListElements.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.append(list1[i] + list2[i])
print("list3:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3: [11, 7, 11, 11, 8, 8]
###Markdown
In the last exercise, we created an empty list, _list3_. We could not fill the list by calling element in it directly, as no elements yet exist in the list. Instead, we use the append method that is owned by the list-object. Alternately, we can use the insert method. It takes the form, _list.insert(index, object)_. This is shown in a later example. We appended the summed values of the first two lists in the order that the elements are ranked. We could have summed them in opposite order by summing element 5, then 4, ..., then 0.
###Code
#addListElements.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
for i in range(j):
list3.insert(0,list1[i] + list2[i])
print("list3:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3: [8, 8, 11, 11, 7, 11]
###Markdown
In the next exercise we will us a function that we have not used before. We will check the length of each list whose elements are summed. We want to make sure that if we call an index from one list, it exists in the other. We do not want to call a list index if it does not exist. That would produce an error. We can check if a statement is true using an if statement. As with the for loop, the if statement is followed by a colon. This tells the program that the execution below or in front of the if statement depends upon the truth of the condition specified. The code that follows below an if statement must be indented, as this identifies what block of code is subject to the statement.
###Code
if True:
print("execute script")
###Output
execute script
###Markdown
If the statement returns _True_, then the commands that follow the if-statement will be executed. Though not stated explicitly, we can think of the program as passing over the if statement to the remainder of the script:
###Code
if True:
print("execute script")
else:
pass
###Output
execute script
###Markdown
If the statement returns _False_, then the program will continue reading the script.
###Code
if False:
print("execute script")
else:
pass
###Output
_____no_output_____
###Markdown
Nothing is printed in the console since there is no further script to execute.We will want to check if the lengths of two different lists are the same. To check that a variable has a stipulated value, we use two equals signs. Using _==_ allows the program to compare two values rather setting the value of the variable on the left, as would occur with only one equals sign.Following the if statement is a for loop. If the length of _list1_ and _list2_ are equal, the program will set the ith element of _list3_ equal to the sum of the ith elements from _list1_ and _list2_. In this example, the for loop will cycle through index values 0, 1, 2, 3, 4, and 5.We can take advantage of the for loop to use _.insert()_ in a manner that replicates the effect of our use of _append()_. We will insert the sum of the ith elements of _list1_ and _list2_ at the ith element of _list3_.
###Code
#addListElements3.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
if j == len(list2):
for i in range(0, len(list2)):
list3.insert(i,list1[i] + list2[i])
print("list3:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3: [11, 7, 11, 11, 8, 8]
###Markdown
The if condition may be followed by an else statement. This tells the program to run a different command if the condition of the if statement is not met. In this case, we want the program to tell us why the condition was not met. In other cases, you may want to create other if statements to create a tree of possible outcomes. Below we use an if-else statement to identify when list’s are not the same length. We remove the last element from _list2_ to create lists of different lengths:
###Code
#addListElements4.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
if j == len(list2):
for i in range(0, len(list2)):
list3.insert(i,list1[i] + list2[i])
else:
print("Lists are not the same length, cannot perform element-wise operations.")
print("list3:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
Lists are not the same length, cannot perform element-wise operations.
list3: []
###Markdown
Since the condition passed to the if statement was false, no values were appended to *list3*. Removing List Elements| New Concepts | Description || --- | --- || _del_ | The command del is used to delete an element from a list ||List Methods i.e., _.pop()_, _.remove()_, _.append()_ | Lists contains methods that can be used to modify the list. These include _.pop()_ which removes the last element of a list, allowing it to be saved as a separate object. Another method, _.remove()_ deletes an explicitly identified element. _.append(x)_ adds an additional element at the end of the list. | Perhaps you want to remove an element from a list. There are a few means of accomplishing this. Which one you choose depends on the ends desired.
###Code
#deleteListElements.py
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = ["nose", "ice", "fire", "cat", "mouse", "dog"]
print("lists before deletion: ")
for i in range(len(list1)):
print(list1[i],"\t", list2[i])
del list1[0]
del list2[5]
print()
print("lists after deletion: ")
for i in range(len(list1)):
print(list1[i], "\t",list2[i])
###Output
lists before deletion:
red nose
blue ice
orange fire
black cat
white mouse
golden dog
lists after deletion:
blue nose
orange ice
black fire
white cat
golden mouse
###Markdown
We have deleted _"red"_ from _list1_ and _"dog"_ from _list2_. By printing the elements of each list once before and once after one element is deleted from each, we can note the difference in the lists over time. What if we knew that we wanted to remove the elements but did not want to check what index each element is associated with? We can use the remove function owned by each list. We will tell _list1_ to remove _"red"_ and _list2_ to remove _"dog"_.
###Code
#removeListElements.py
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = ["nose", "ice", "fire", "cat", "mouse", "dog"]
print("lists before deletion: ")
for i in range(len(list1)):
print(list1[i],"\t", list2[i])
list1.remove("red")
list2.remove("dog")
print()
print("lists after deletion: ")
for i in range(len(list1)):
print(list1[i], "\t",list2[i])
###Output
lists before deletion:
red nose
blue ice
orange fire
black cat
white mouse
golden dog
lists after deletion:
blue nose
orange ice
black fire
white cat
golden mouse
###Markdown
We have achieved the same result using a different means. What if we wanted to keep track of the element that we removed? Before deleting or removing the element, we could assign the value to a different object. Let's do this before using the remove function:
###Code
#removeAndSaveListElementsPop.py
#define list1 and list2
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = ["nose", "ice", "fire", "cat", "mouse", "dog"]
#identify what is printed in for loop
print("lists before deletion: ")
if len(list1) == len(list2):
# use for loop to print lists in parallel
for i in range(len(list1)):
print(list1[i],"\t", list2[i])
# remove list elements and save them as variables '_res"
list1_res = "red"
list2_res = "dog"
list1.remove(list1_res)
list2.remove(list2_res)
print()
# print lists again as in lines 8-11
print("lists after deletion: ")
if len(list1) == len(list2):
for i in range(len(list1)):
print(list1[i], "\t",list2[i])
print()
print("Res1", "\tRes2")
print(list1_res, "\t" + (list2_res))
###Output
lists before deletion:
red nose
blue ice
orange fire
black cat
white mouse
golden dog
lists after deletion:
blue nose
orange ice
black fire
white cat
golden mouse
Res1 Res2
red dog
###Markdown
An easier way to accomplish this is to use _.pop_, another method owned by each list.
###Code
#removeListElementsPop.py
#define list1 and list2
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = ["nose", "ice", "fire", "cat", "mouse", "dog"]
#identify what is printed in for loop
print("lists before deletion: ")
# use for loop to print lists in parallel
for i in range(len(list1)):
print(list1[i],"\t", list2[i])
# remove list elements and save them as variables '_res"
list1_res = list1.pop(0)
list2_res = list2.pop(5)
print()
# print lists again as in lines 8-11
print("lists after deletion: ")
for i in range(len(list1)):
print(list1[i], "\t",list2[i])
print()
print("Res1", "\tRes2")
print(list1_res, "\t" + (list2_res))
###Output
lists before deletion:
red nose
blue ice
orange fire
black cat
white mouse
golden dog
lists after deletion:
blue nose
orange ice
black fire
white cat
golden mouse
Res1 Res2
red dog
###Markdown
More with For Loops When you loop through element values, it is not necessary that these are consecutive. You may skip values at some interval. The next example returns to the earlier _addListElements.py_ examples. This time, we pass the number 2 as the third element in _range()_. Now range will count by twos from _0 _ to _j – 1_. This will make _list3_ shorter than before.
###Code
#addListElements5.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
if j == len(list2):
for i in range(0, j, 2):
list3.append(list1[i] + list2[i])
else:
print("Lists are not the same length, cannot perform element-wise operations.")
print("list3:", list3)
###Output
list1 elements: 5 4 9 10 3
list2 elements: 6 3 2 1 5
list3: [11, 11, 8]
###Markdown
We entered the sum of elements 0, 2, and 4 from _list1_ and _list2_ into _list3_. Since these were appended to _list3_, they are indexed in _list3[0]_, _list3[1]_, and _list3[2]_.For loops in python can call in sequence element of objects that are iterable. These include lists, strings, keys and values from dictionaries, as well as the range function we have already used. You may use a for loop that calls each element in the list without identifying the index of each element.
###Code
obj = ["A", "few", "words", "to", "print"]
for x in obj:
print(x)
###Output
A
few
wordsto
print
###Markdown
Each _x_A _ called is an element from _obj_. Where before we passed _len(list1)_ to the for loop, we now pass _list1_ itself to the for loop and append each element _x _ to _list2_.
###Code
#forLoopWithoutIndexer.py
list1 = ["red", "blue", "orange", "black", "white", "golden"]
list2 = []
for x in list1:
list2.append(x)
print("list1\t", "list2")
k = len(list1)
j = len(list2)
if len(list1) == len(list2):
for i in range(0, len(list1)):
print(list1[i], "\t", list2[i])
###Output
list1 list2
red red
blue blue
orange orange
black black
white white
golden golden
###Markdown
Sorting Lists, Errors, and Exceptions| New Concepts | Description || --- | --- || _sorted()_ | The function sorted() sorts a list in order of numerical or alphabetical value. || passing errors i.e., _try_ and _except_ | A try statement will pass over an error if one is generated by the code in the try block. In the case that an error is passed, code from the except block well be called. This should typically identify the type of error that was passed. |We can sort lists using the sorted list function that orders the list either by number or alphabetically. We reuse lists from the last examples to show this.
###Code
#sorting.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = ["red", "blue", "orange", "black", "white", "golden"]
print("list1:", list1)
print("list2:", list2)
sorted_list1 = sorted(list1)
sorted_list2 = sorted(list2)
print("sortedList1:", sorted_list1)
print("sortedList2:", sorted_list2)
###Output
list1: [5, 4, 9, 10, 3, 5]
list2: ['red', 'blue', 'orange', 'black', 'white', 'golden']
sortedList1: [3, 4, 5, 5, 9, 10]
sortedList2: ['black', 'blue', 'golden', 'orange', 'red', 'white']
###Markdown
What happens if we try to sort a that has both strings and integers? You might expect that Python would sort integers and then strings or vice versa. If you try this, you will raise an error:
###Code
#sortingError.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = ["red", "blue", "orange", "black", "white", "golden"]
list3 = list1 + list2
print("list1:", list1)
print("list2:", list2)
print("list3:", list3)
sorted_list1 = sorted(list1)
sorted_list2 = sorted(list2)
print("sortedList1:", sorted_list1)
print("sortedList2:", sorted_list2)
sorted_list3 = sorted(list3)
print("sortedList3:", sorted_list3)
print("Execution complete!")
###Output
list1: [5, 4, 9, 10, 3, 5]
list2: ['red', 'blue', 'orange', 'black', 'white', 'golden']
list3: [5, 4, 9, 10, 3, 5, 'red', 'blue', 'orange', 'black', 'white', 'golden']
sortedList1: [3, 4, 5, 5, 9, 10]
sortedList2: ['black', 'blue', 'golden', 'orange', 'red', 'white']
###Markdown
The script returns an error. If this error is raised during execution, it will interrupt the program. One way to deal with this is to ask Python to try to execute some script and to execute some other command if an error would normally be raised:
###Code
#sortingError.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = ["red", "blue", "orange", "black", "white", "golden"]
list3 = list1 + list2
print("list1:", list1)
print("list2:", list2)
print("list3:", list3)
sorted_list1 = sorted(list1)
sorted_list2 = sorted(list2)
print("sortedList1:", sorted_list1)
print("sortedList2:", sorted_list2)
try:
sorted_list3 = sorted(list3)
print("sortedList3:", sorted_list3)
except:
print("TypeError: unorderable types: str() < int() "
"ignoring error")
print("Execution complete!")
###Output
list1: [5, 4, 9, 10, 3, 5]
list2: ['red', 'blue', 'orange', 'black', 'white', 'golden']
list3: [5, 4, 9, 10, 3, 5, 'red', 'blue', 'orange', 'black', 'white', 'golden']
sortedList1: [3, 4, 5, 5, 9, 10]
sortedList2: ['black', 'blue', 'golden', 'orange', 'red', 'white']
TypeError: unorderable types: str() < int() ignoring error
Execution complete!
###Markdown
We successfully avoided the error and instead called an alternate operation defined under except. The use for this will become more obvious as we move along. We will except use them from time to time and note the reason when we do. Slicing a List| New Concepts | Description || --- | --- || slice i.e., _list\[a:b\]_|A slice of a list is a copy of a portion (or all) of a list from index a to b – 1.|Sometimes, we may want to access several elements instantly. Python allows us to do this with a slice. Technically, when you call a list in its entirety, you take a slice that includes the entire list. We can do this explicitly like this:
###Code
#fullSlice.py
some_list = [3, 1, 5, 6, 1]
print(some_list[:])
###Output
[3, 1, 5, 6, 1]
###Markdown
Using *some_list\[:\]* is equivalent of creating a slice using *some_list\[min_index: list_length\]* where *min_index = 0* and *list_length= len(some_list)*:
###Code
#fullSlice2.py
some_list = [3, 1, 5, 6, 1]
min_index = 0
max_index = len(some_list)
print("minimum:", min_index)
print("maximum:", max_index)
print("Full list using slice", some_list[min_index:max_index])
print("Full list without slice", some_list)
###Output
minimum: 0
maximum: 5
Full list using slice [3, 1, 5, 6, 1]
Full list without slice [3, 1, 5, 6, 1]
###Markdown
This is not very useful if we do not use this to take a smaller subsection of a list. Below, we create a new array that is a subset of the original array. As you might expect by now, *full_list\[7\]* calls the 8th element. Since indexing begins with the 0th element, this element is actually counted as the 7th element. Also, similar to the command *for i in range(3, 7)*, the slice calls elements 3, 4, 5, and 6:
###Code
#partialSlice.py
min_index = 3
max_index = 7
full_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
partial_list = full_list[min_index:max_index]
print("Full List:", full_list)
print("Partial List:", partial_list)
print("full_list[7]:", full_list[7])
###Output
Full List: [1, 2, 3, 4, 5, 6, 7, 8, 9]
Partial List: [4, 5, 6, 7]
full_list[7]: 8
###Markdown
Nested For Loops| New Concepts | Description || --- | --- || Nested For Loops | A for loop may contain other for loops. They are useful for multidimensional data structures. |Creative use of for loops can save the programmer a lot of work. While you should be careful not to create so many layers of for loops and if statements that code is difficult to interpret (“Flat is better than nested”), you should be comfortable with the structure of nested for loops and, eventually, their use in structures like dictionaries and generators.A useful way to become acquainted with the power of multiple for loops is to identify what each the result of each iteration of nested for loops. In the code below, the first for loop will count from 0 to 4. For each value of *i*, the second for loop will cycle through values 0 to 4 for _j _.
###Code
#nestedForLoop.py
print("i", "j")
for i in range(5):
for j in range(5):
print(i, j)
###Output
i j
0 0
0 1
0 2
0 3
0 4
1 0
1 1
1 2
1 3
1 4
2 0
2 1
2 2
2 3
2 4
3 0
3 1
3 2
3 3
3 4
4 0
4 1
4 2
4 3
4 4
###Markdown
Often, we will want to employ values generated by for loops in a manner other than printing the values generated directly by the for loops. We may, for example, want to create a new value constructed from _i _ and _j _. Below, this value is constructed as the sum of _i _ and _j _.
###Code
#nestedForLoop.py
print("i", "j", "i+j")
for i in range(5):
for j in range(5):
val = i + j
print(i, j, val)
###Output
i j i+j
0 0 0
0 1 1
0 2 2
0 3 3
0 4 4
1 0 1
1 1 2
1 2 3
1 3 4
1 4 5
2 0 2
2 1 3
2 2 4
2 3 5
2 4 6
3 0 3
3 1 4
3 2 5
3 3 6
3 4 7
4 0 4
4 1 5
4 2 6
4 3 7
4 4 8
###Markdown
If we interpret the results as a table, we can better understand the intuition of for loops. Lighter shading indicates lower values of _i _ with shading growing darker as the value of _i _ increases.| | | | |__j__| | | | --- | --- | --- | --- | --- | --- | --- || | | __0__ | __1__ |__2__|__3__ | __4__ || | __0__ | 0 | 1 | 2 | 3 | 4 || | __1__ | 1 | 2 | 3 | 4 | 5 || __i__ | __2__ | 2 | 3 | 4 | 5 | 6 || | __3__ | 3 | 4 | 5 | 6 | 7 || | __4__ | 4 | 5 | 6 | 7 | 8 | Lists, Lists, and More Lists| New Concepts | Description || --- | --- || _min(lst)_ | The function _min()_ returns the lowest value from a list of values passed to it. || _max(lst)_ | The function _max()_ returns that highest value from a list of values passed to it. || generators i.e., _[val for val in lst]_ |Generators use a nested for loop to create an iterated data structure. |Lists have some convenient features. You can find the maximum and minimum values in a list with the _min()_ and _max()_ functions:
###Code
# minMaxFunctions.py
list1 = [20, 30, 40, 50]
max_list_value = max(list1)
min_list_value = min(list1)
print("maximum:", max_list_value, "minimum:", min_list_value)
###Output
maximum: 50 minimum: 20
###Markdown
We could have used a for loop to find these values. The program below performs the same task:
###Code
#minMaxFuntionsByHand.py
list1 = [20, 30, 40, 50]
# initial smallest value is infinite
# will be replaced if a value from the list is lower
min_list_val = float("inf")
# initial largest values is negative infinite
# will be replaced if a value from the list is higher
max_list_val = float("-inf")
for x in list1:
if x < min_list_val:
min_list_val = x
if x > max_list_val:
max_list_val = x
print("maximum:", max_list_val, "minimum:", min_list_val)
###Output
maximum: 50 minimum: 20
###Markdown
We chose to make the starting value of min_list_value large and positive and the starting value of *max_list_value* large and negative. The for loop cycles through these values and assigns the value, _x _, from the list to *min_list_value* if the value is less than the current value assigned to *min_list_value* and to *max_list_value* if the value is greater than the current value assigned to *max_list_value*.Earlier in the chapter, we constructed lists using list comprehension (i.e., the _list()_ function) and by generating lists and setting values with _.append()_ and _.insert()_. We may also use a generator to create a list. Generators are convenient as they provide a compact means of creating a list that is easier to interpret. They follow the same format as the _list()_ function.
###Code
#listFromGenerator.py
generator = (i for i in range(20))
print(generator)
list1 = list(generator)
print(list1)
list2 = [2 * i for i in range(20)]
print(list2)
###Output
<generator object <genexpr> at 0x0000023355A96228>
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38]
|
Week-1/CS6501_Lab_0_15.ipynb | ###Markdown
**Artificial Intelligence - MSc**CS6501 - MACHINE LEARNING AND APPLICATIONS Instructor: Enrique NaredoCS6501_Lab-0.15_sol**Extra Numpy problems**
###Code
import numpy as np
###Output
_____no_output_____ |
quickstart.ipynb | ###Markdown
The training and evaluation of a cell-based NARVAL R2B4 modelThis notebook is a quickly illustrates the process of training and evaluating a cell-based neural network based cloud cover parameterization. We train and evaluate the neural network on coarse-grained and preprocessed NARVAL R2B4 data. In this notebook, we work with data that is already prepared for this purpose.
###Code
# Importing necessary packages
import sys
import time
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Print options for pandas
pd.options.display.float_format = "{:,.2g}".format
# Reloading custom file to incorporate changes dynamically
import importlib
import my_classes
importlib.reload(my_classes)
# Data path
path = '/pf/b/b309170'
path_data = path + '/workspace_icon-ml/iconml_clc/extract_from_the_data/coarse-grained_and_preprocessed/narval_r2b4_cell_based'
# Stop training after how many minutes.
# We decreased the training time for this notebook to an hour.
from my_classes import TimeOut
t0 = time.time()
timeout = 60
# Random seed
NUM = 1
tf.random.set_seed(NUM)
# List GPUs
gpus = tf.config.experimental.list_physical_devices('GPU')
gpus
###Output
_____no_output_____
###Markdown
Loading the data**Input-variables:**- *fr_land*: Fraction of land - *z_g*: Geometric height at full levels- *q_v*: Specific water vapor content- *q_i*: Specific cloud ice content- *temp*: Temperature- *pres*: Pressure**Output-variables:**- *clc*: Cloud cover (cloud volume fraction)
###Code
# Load coarse-grained and preprocessed data. I split up the data so that it fits into the 100MB file size limit on GitHub
input_train = [np.load(path_data + '/cloud_cover_all_days_input_train_%d.npy'%k) for k in range(14)]
input_valid = [np.load(path_data + '/cloud_cover_all_days_input_valid_%d.npy'%k) for k in range(2)]
input_test = [np.load(path_data + '/cloud_cover_all_days_input_test_%d.npy'%k) for k in range(4)]
output_train = [np.load(path_data + '/cloud_cover_all_days_output_train_%d.npy'%k) for k in range(3)]
input_train = np.concatenate(input_train, axis=0)
input_valid = np.concatenate(input_valid, axis=0)
input_test = np.concatenate(input_test, axis=0)
output_train = np.concatenate(output_train, axis=0)
output_valid = np.load(path_data + '/cloud_cover_all_days_output_valid.npy')
output_test = np.load(path_data + '/cloud_cover_all_days_output_test.npy')
print('The training data consists of %d training samples.'%input_train.shape[0])
print('The validation data consists of %d validation samples.'%input_valid.shape[0])
print('The test data consists of %d test samples.'%input_test.shape[0])
print('We have %d input features for the neural network.'%input_train.shape[1])
assert input_train.shape[1] == input_valid.shape[1] == input_test.shape[1]
###Output
The training data consists of 26482169 training samples.
The validation data consists of 2942463 validation samples.
The test data consists of 7356158 test samples.
We have 6 input features for the neural network.
###Markdown
Looking at the dataThe corresponding means, standard deviations and features can be found in: /n1_cell_based_narval_r2b4/saved_models/model_grid_cell_based_v3_final_1.txt
###Code
features = ['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land']
mean = np.array([5.37518440e-03, 4.65389731e-07, 2.59635412e+02, 5.52329389e+04, 6.79260772e+03, 2.58097095e-01])
std = np.array([6.01943993e-03, 3.95009930e-06, 3.55940285e+01, 3.26642242e+04, 6.20726361e+03, 4.28313535e-01])
# Looking at the data. It is already normalized.
pd.DataFrame(input_train, columns=features).describe()
# The validation/test data is normalized w.r.t. the training data, so e.g. the means are also close to zero
assert 1e-3 < np.sum(np.abs(input_valid.mean(axis=0))) < 1e-2
assert 1e-3 < np.sum(np.abs(input_test.mean(axis=0))) < 1e-2
# Reverse scaling to look at the original data
pd.DataFrame((input_train*std)+mean, columns=features).describe()
###Output
_____no_output_____
###Markdown
The neural network
###Code
# Set up the neural network
model = Sequential()
model.add(Dense(256, activation='relu', input_dim = 6))
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='linear'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 256) 1792
_________________________________________________________________
dense_1 (Dense) (None, 256) 65792
_________________________________________________________________
dense_2 (Dense) (None, 1) 257
=================================================================
Total params: 67,841
Trainable params: 67,841
Non-trainable params: 0
_________________________________________________________________
###Markdown
**Here, we train the neural network**
###Code
model.compile(loss='mse', optimizer=Nadam())
# Incorporates early stopping, i.e. the weights from the best concluded epoch are used
time_callback = TimeOut(t0, timeout)
# We increase the batch size from 32 (in the paper) to 512 to require much less time per epoch
history = model.fit(input_train, output_train, batch_size=512, epochs=70, verbose=1,
validation_data=(input_valid, output_valid), callbacks=[time_callback])
###Output
Starting training
Epoch 1/70
51723/51723 [==============================] - 243s 5ms/step - loss: 27.5949 - val_loss: 18.3962
Epoch 2/70
51723/51723 [==============================] - 238s 5ms/step - loss: 18.3481 - val_loss: 18.2304
Epoch 3/70
51723/51723 [==============================] - 241s 5ms/step - loss: 17.6708 - val_loss: 17.8188
Epoch 4/70
51723/51723 [==============================] - 248s 5ms/step - loss: 17.3494 - val_loss: 17.4409
Epoch 5/70
51723/51723 [==============================] - 237s 5ms/step - loss: 17.1172 - val_loss: 16.7895
Epoch 6/70
51723/51723 [==============================] - 242s 5ms/step - loss: 16.9198 - val_loss: 16.5442
Epoch 7/70
51723/51723 [==============================] - 241s 5ms/step - loss: 16.7319 - val_loss: 16.9072
Epoch 8/70
51723/51723 [==============================] - 243s 5ms/step - loss: 16.5933 - val_loss: 16.2358
Epoch 9/70
51723/51723 [==============================] - 241s 5ms/step - loss: 16.4655 - val_loss: 16.3494
Epoch 10/70
51723/51723 [==============================] - 233s 5ms/step - loss: 16.3324 - val_loss: 16.0755
Epoch 11/70
51723/51723 [==============================] - 247s 5ms/step - loss: 16.2916 - val_loss: 16.0633
Epoch 12/70
51723/51723 [==============================] - 242s 5ms/step - loss: 16.1737 - val_loss: 16.6643
Epoch 13/70
51723/51723 [==============================] - 231s 4ms/step - loss: 16.1244 - val_loss: 16.1331
Epoch 14/70
51723/51723 [==============================] - 231s 4ms/step - loss: 16.0196 - val_loss: 16.0688
Epoch 15/70
48524/51723 [===========================>..] - ETA: 13s - loss: 15.9902
Reached 60.000 minutes of training, stopping
51723/51723 [==============================] - 217s 4ms/step - loss: 15.9882 - val_loss: 16.1479
Restore model weights from the end of the best epoch
###Markdown
**Let's look at how the training proceeded**
###Code
#Plotting the training progress
if len(history.history['loss']) > len(history.history['val_loss']):
del history.history['loss'][-1]
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.ylabel('Mean Squared Error')
plt.xlabel('Number of epochs')
plt.show()
###Output
_____no_output_____
###Markdown
**We evaluate the mean-squared errors on the training/validation/test sets** In the paper, we have MSEs of around 15 for the cell-based NARVAL R2B4 model
###Code
train_loss = model.evaluate(input_train, output_train, verbose=2, batch_size=2000)
valid_loss = model.evaluate(input_valid, output_valid, verbose=2, batch_size=1000)
test_loss = model.evaluate(input_test, output_test, verbose=2, batch_size=1000)
print('\nTraining loss: %.4f'%(train_loss))
print('Validation loss: %.4f'%(valid_loss))
print('Test loss: %.4f'%(test_loss))
print('Training epochs: %d'%(len(history.history['val_loss'])))
###Output
13242/13242 - 23s - loss: 15.9891
2943/2943 - 4s - loss: 16.0633
7357/7357 - 11s - loss: 16.0345
Training loss: 15.9891
Validation loss: 16.0633
Test loss: 16.0345
Training epochs: 15
###Markdown
Evaluation of the neural networkIn the data we have just worked with (input_train, input_valid, input_test), where we do not know the vertical layer of the data samples. We cannot use that data to evaluate the neural networks using vertical profiles in this example. What we thus did was to load the NARVAL R2B4 data and to store it as an npy file (which saves space) in /extract_from_the_data/narval_r2b4_data_for_the_quickstart.npy
###Code
# # We ran this cell once:
# from my_classes import load_data
# # Loads the NARVAL data into the data_dict dictionary
# order_of_vars=['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land', 'clc']
# data_dict = load_data(source='narval', days='all', vert_interp=True, order_of_vars=order_of_vars)
# #Reshaping into nd-arrays of equaling shapes (have timesteps x vert x hor)
# data_dict['zg'] = np.repeat(np.expand_dims(data_dict['zg'], 0), data_dict['qv'].shape[0], axis=0)
# data_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 0), data_dict['qv'].shape[0], axis=0)
# data_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 1), data_dict['qv'].shape[1], axis=1)
# assert data_dict['fr_land'].shape == data_dict['qv'].shape == data_dict['zg'].shape
# # Convert data_dict into numpy file and save as npy. We cannot take more than 54 time steps due to size limits on GitHub.
# np_arr = np.array([data_dict[key][:54] for key in data_dict.keys()])
# np.save('./extract_from_the_data/narval_r2b4_data_for_the_quickstart.npy', np_arr)
###Output
_____no_output_____
###Markdown
**First evaluation method** Samples on x-axis and cloud cover on y-axis. This we can do without knowing which vertical layer corresponds to a data sample.
###Code
no_samples = 50
clc_pred = model.predict(input_test[:no_samples])[:,0]
clc_true = output_test[:no_samples]
a = np.linspace(1, no_samples, no_samples)
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(111, title='NNs predictions on random samples from the test set',
xlabel='Sample', ylabel='Cloud Cover')
ax.plot(a, clc_pred, 'ro')
ax.plot(a, clc_true, 'bo')
ax.legend(['Predictions', 'Truth'])
###Output
_____no_output_____
###Markdown
**Second evaluation method** Mean profile of Cloud Cover on x-axis with Vertical Layer on y-axis Here, I'm taking only the first 54 time steps of the NARVAL R2B4 data.
###Code
# Load and scale the data
np_arr = np.load('./extract_from_the_data/narval_r2b4_data_for_the_quickstart.npy')
scaled_data = np.array([(np_arr[i] - mean[i])/std[i] for i in range(len(features))])
scaled_data = np.append(scaled_data, np_arr[-1:], axis=0) # Add cloud cover
# Removes data above 21 km
scaled_data = scaled_data[:,:,4:,:]
scaled_data.shape # features x time steps x vertical layers x horizontal fields
# Predicted cloud cover mean per vertical layer
clc_pred_mean = []
for v_layer in range(21, 48):
pred = np.transpose(model.predict_on_batch(np.transpose(scaled_data[:-1,:,v_layer-21,:])))
pred_adj = np.minimum(np.maximum(pred, 0), 100)
clc_pred_mean.append(np.mean(pred_adj))
# Cloud cover mean in the data
clc_data_mean = np.mean(scaled_data[-1], axis=(0, 2))
# Plot mean cloud cover per vertical layer
a = np.linspace(21, 48, 27)
fig = plt.figure(figsize=(7,4))
ax = fig.add_subplot(111, xlabel='Mean Cloud Cover [%]', ylabel='Vertical layer')
ax.plot(clc_data_mean, a)
ax.plot(clc_pred_mean, a)
plt.gca().invert_yaxis()
ax.legend(['Ground truth', 'Prediction'])
zg_mean = np.mean(np_arr[np.where([features[i] == 'zg' for i in range(len(features))])[0][0], :, 4:, :], axis=(0,2))
# Plot mean cloud cover per averaged height
fig = plt.figure(figsize=(7,4))
ax = fig.add_subplot(111, xlabel='Mean Cloud Cover [%]', ylabel='z [km]')
ax.plot(clc_data_mean, zg_mean)
ax.plot(clc_pred_mean, zg_mean)
ax.legend(['Ground truth', 'Prediction'])
###Output
_____no_output_____
###Markdown
**Third visualization method** R2-value on x-axis with vertical layers on y-axis
###Code
# r2 = 1-MSE/VAR
r2 = []
for v_layer in range(23, 48):
pred = np.transpose(model.predict_on_batch(np.transpose(scaled_data[:-1,:,v_layer-21,:])))
pred_adj = np.minimum(np.maximum(pred, 0), 100)
mse = np.mean((pred_adj - scaled_data[-1, :, v_layer-21, :])**2)
var = np.var(scaled_data[-1, :, v_layer-21, :])
r2.append(1-mse/var)
# Plotting
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0, 1), title='R2-values of the cell-based model excluding the two upper-most layers',
xlabel='R2-value/Coefficient of determination', ylabel='z [km]')
ax.plot(r2, zg_mean[2:], 'bo', ls='--')
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TensorFlow Recommenders: Quickstart View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In this tutorial, we build a simple matrix factorization model using the [MovieLens 100K dataset](https://grouplens.org/datasets/movielens/100k/) with TFRS. We can use this model to recommend movies for a given user. Import TFRSFirst, install and import TFRS:
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
Read the data
###Code
# Ratings data.
ratings = tfds.load('movielens/100k-ratings', split="train")
# Features of all the available movies.
movies = tfds.load('movielens/100k-movies', split="train")
# Select the basic features.
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"]
})
movies = movies.map(lambda x: x["movie_title"])
###Output
_____no_output_____
###Markdown
Build vocabularies to convert user ids and movie titles into integer indices for embedding layers:
###Code
user_ids_vocabulary = tf.keras.layers.experimental.preprocessing.StringLookup(mask_token=None)
user_ids_vocabulary.adapt(ratings.map(lambda x: x["user_id"]))
movie_titles_vocabulary = tf.keras.layers.experimental.preprocessing.StringLookup(mask_token=None)
movie_titles_vocabulary.adapt(movies)
###Output
_____no_output_____
###Markdown
Define a modelWe can define a TFRS model by inheriting from `tfrs.Model` and implementing the `compute_loss` method:
###Code
class MovieLensModel(tfrs.Model):
# We derive from a custom base class to help reduce boilerplate. Under the hood,
# these are still plain Keras Models.
def __init__(
self,
user_model: tf.keras.Model,
movie_model: tf.keras.Model,
task: tfrs.tasks.Retrieval):
super().__init__()
# Set up user and movie representations.
self.user_model = user_model
self.movie_model = movie_model
# Set up a retrieval task.
self.task = task
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
# Define how the loss is computed.
user_embeddings = self.user_model(features["user_id"])
movie_embeddings = self.movie_model(features["movie_title"])
return self.task(user_embeddings, movie_embeddings)
###Output
_____no_output_____
###Markdown
Define the two models and the retrieval task.
###Code
# Define user and movie models.
user_model = tf.keras.Sequential([
user_ids_vocabulary,
tf.keras.layers.Embedding(user_ids_vocabulary.vocab_size(), 64)
])
movie_model = tf.keras.Sequential([
movie_titles_vocabulary,
tf.keras.layers.Embedding(movie_titles_vocabulary.vocab_size(), 64)
])
# Define your objectives.
task = tfrs.tasks.Retrieval(metrics=tfrs.metrics.FactorizedTopK(
movies.batch(128).map(movie_model)
)
)
###Output
_____no_output_____
###Markdown
Fit and evaluate it.Create the model, train it, and generate predictions:
###Code
# Create a retrieval model.
model = MovieLensModel(user_model, movie_model, task)
model.compile(optimizer=tf.keras.optimizers.Adagrad(0.5))
# Train for 3 epochs.
model.fit(ratings.batch(4096), epochs=3)
# Use brute-force search to set up retrieval using the trained representations.
index = tfrs.layers.factorized_top_k.BruteForce(model.user_model)
index.index(movies.batch(100).map(model.movie_model), movies)
# Get some recommendations.
_, titles = index(np.array(["42"]))
print(f"Top 3 recommendations for user 42: {titles[0, :3]}")
###Output
_____no_output_____
###Markdown
A brief explanation of the revamped CBRAIN moduleIn this guide, we will go through the steps required to preprocess the raw climate model output, train a neural network and then analyze how good it is.
###Code
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
###Output
_____no_output_____
###Markdown
PreprocessingPreprocessing works using the `preprocessing.py` script in the main directory of the repository along with a configuration file. Let's have a look at one such configuration file to see what is required. Here is `000_test.yml`:```yamlvars : [QBP, QCBP, QIBP, TBP, VBP, PS, SOLIN, SHFLX, LHFLX, PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic]in_dir : /local/S.Rasp/sp32fbp_andkua/in_fns : AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-01-0*-00000.ncout_dir: /local/S.Rasp/preprocessed_data/out_fn: 000_train.ncval_in_fns: AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-02-0*-00000.ncval_out_fn: 000_valid.ncnorm_fn: 000_norm.nc```One new thing is that it is not necessary to specifically state the input and output variables at the preprocessing script. These will be chosen later in the data generator. This mean that `vars` can contain as many variables as possible. This should reduce the number of times the preprocessing has to be run.Some of the variables are saved directly in the model output, e.g. `PHQ`, while others, e.g. `QBP` have to be derived. Currently, the following derived variables are available: `*dt_adiabatic`, `*BP` and `PRECST`. If you want to add others, you have to implement it in `convert_dataset.py`.`in_dir` and `in_fns` describe the raw climate model file names. `out_dir` and `out_fn` denote the path to the preprocessed file. The training dataset will additionally be preshuffled.If `val_*` is given, a separate validation dataset will be created.If `norm_fn` is given, statistics will be computed from the training dataset, e.g. the mean and standard deviation. Note that for large training files this takes a very, very long time. Therefore, I recommend computing the normalization file on a small dataset and use it for the large training dataset.
###Code
!python preprocessing.py -c pp_config/000_test.yml
!ls /local/S.Rasp/preprocessed_data/000*
###Output
/local/S.Rasp/preprocessed_data/000_norm.nc
/local/S.Rasp/preprocessed_data/000_train.nc
/local/S.Rasp/preprocessed_data/000_train_shuffle.nc
/local/S.Rasp/preprocessed_data/000_valid.nc
###Markdown
Training the model step-by-stepLet's now go through the steps to train a neural network starting with the new data generator. Data generatorTo read the preprocessed file and feed the data to the neural net, we will use the `DataGenerator` class in the cbrain module. At this stage we will not define the variables we want in the input and output of the neural network.
###Code
in_vars = ['QBP', 'QCBP', 'QIBP', 'TBP', 'VBP',
'Qdt_adiabatic', 'QCdt_adiabatic', 'QIdt_adiabatic', 'Tdt_adiabatic', 'Vdt_adiabatic',
'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS', 'DTVKE',
'FSNT', 'FSNS', 'FLNT', 'FLNS', 'PRECT', 'PRECTEND', 'PRECST', 'PRECSTEN']
###Output
_____no_output_____
###Markdown
Next we need to think about how we want to normalize/scale the inputs and outputs. For the inputs we will use a pretty standard normalization, which we will get to later.As for the outputs, we would like to scale each variable physically. For this we will create a dictionary, which will contain a factor or a vector of factors for each variable.
###Code
scale_dict = {
'PHQ': L_V/G,
'PHCLDLIQ': L_V/G,
'PHCLDICE': L_V/G,
'TPHYSTND': C_P/G,
'QRL': C_P/G,
'QRS': C_P/G,
'DTVKE': C_P/G,
'FSNT': 1,
'FSNS': 1,
'FLNT': 1,
'FLNS': 1,
'PRECT': RHO_L*L_V,
'PRECTEND': 1e-3*RHO_L*L_V,
'PRECST': RHO_L*L_V,
'PRECSTEN': 1e-3*RHO_L*L_V
}
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1]; # Differential pressure [Pa]
for v in ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS']:
scale_dict[v] *= dP
scale_dict['DTVKE'] *= (dP/DT)
###Output
_____no_output_____
###Markdown
Now we will save this dictionary as a pickle file, so that we can later load it in th training script.
###Code
save_pickle('./nn_config/scale_dicts/001_toms_scaling.pkl', scale_dict)
###Output
_____no_output_____
###Markdown
With this dictionary, we can now use create a DataGenerator instance.A word about the normalization. The normalization is handled by Normalizer classes, defined in `normalization.py`. As default in `DataGenerator`, for input normalization the `InputNormalizer` class is used, while the outputs are scaled using the `DictNormalizer`. `DataGenerator` takes a tuple of strings for `input_transform`. This tuple describes which arrays from the normalization file (`norm_fn`) the input will be subtracted and divided by. If you want to create your own fancy normalization, you have to create such a Normalizer which has to have a `transform` method.
###Code
from cbrain.data_generator import DataGenerator
train_gen = DataGenerator(
data_fn = '/local/S.Rasp/preprocessed_data/000_train_shuffle.nc',
input_vars = in_vars,
output_vars = out_vars,
norm_fn = '/local/S.Rasp/preprocessed_data/000_norm.nc',
input_transform = ('mean', 'maxrs'),
output_transform = scale_dict,
batch_size=1024,
shuffle=True
)
X, Y = train_gen[0]; X.shape, Y.shape
###Output
_____no_output_____
###Markdown
Create a model with conservation layersNext we need to create a model. This is just basic Keras. I will show here how to use the conservation layers written by Tom. These layers require some additional input, in particular the normalization information.
###Code
from cbrain.layers import *
from tensorflow.keras.layers import *
inp_layer = Input(shape=(304,))
x = Dense(214, activation='elu')(inp_layer)
x = SurRadLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
x = MassConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
out_layer = EntConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
model = tf.keras.models.Model(inp_layer, out_layer)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 214) 65270 input_1[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
dense[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 65,270
Trainable params: 65,270
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Train the model with the weakly constrained energy lossAgain, this is basic Keras. Tom implemented the option to penalize the network for violating physical constraints, however, which we will do here.I implemented the weakly constrained loss as a class in `losses.py`. When initializing this class, you need to pass the input_tensor from the model and again the normalization information.This loss computes three losses internally: the standard MSE, a loss for mass conservation and a loss for enthalpy conservation. You can weight the losses by changing the `alpha_*` parameters.
###Code
from cbrain.losses import *
weak_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'])
mass_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=1, alpha_ent=0, name='mass_loss')
ent_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=0, alpha_ent=1, name='ent_loss')
model.compile(tf.keras.optimizers.Adam(lr=0.01), loss=weak_loss, metrics=[mass_loss, ent_loss, mse])
model.fit_generator(train_gen, epochs=5)
###Output
Epoch 1/5
3448/3448 [==============================] - 109s 32ms/step - loss: 740.1542 - mass_loss: 3.6446e-11 - ent_loss: 7.8155e-11 - mean_squared_error: 1480.3085
Epoch 2/5
3448/3448 [==============================] - 67s 20ms/step - loss: 540.9411 - mass_loss: 6.3662e-11 - ent_loss: 1.4316e-10 - mean_squared_error: 1081.8822
Epoch 3/5
3448/3448 [==============================] - 67s 19ms/step - loss: 419.6522 - mass_loss: 8.7082e-11 - ent_loss: 1.6894e-10 - mean_squared_error: 839.3044
Epoch 4/5
3448/3448 [==============================] - 66s 19ms/step - loss: 356.3085 - mass_loss: 1.0223e-10 - ent_loss: 1.9572e-10 - mean_squared_error: 712.6170
Epoch 5/5
3448/3448 [==============================] - 67s 19ms/step - loss: 322.0714 - mass_loss: 1.1291e-10 - ent_loss: 2.1134e-10 - mean_squared_error: 644.1428
###Markdown
Naturally, since we are using a conserving network, the conservation losses are basically zero. Puh... Train the network using the train.py scriptDoing the training in a notebook is good for experimentation but for testing different configurations and using large training datasets, we need a command line script, which is called `train.py`. So, let's check out how to use it.Again we will create a configuration file. This is `000_example.yml`:``` Example training configuration fileexp_name: 000_exampledata_dir: /local/S.Rasp/preprocessed_data/train_fn: 000_train_shuffle.ncvalid_fn: 000_valid.ncnorm_fn: 000_norm.ncinputs: [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]outputs: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]input_sub: meaninput_div: maxrsoutput_dict: /home/s/S.Rasp/repositories/CBRAIN-CAM/nn_config/scale_dicts/001_toms_scaling.pklhidden_layers: [128, 214]epochs: 10conservation_layer: Trueloss: weak_loss```Most of the arguments are pretty self-explanatory (I hope). If you are confused look at the definition at the bottom of `train.py`.This script automatically uses learning rate decay.Additionally, and very importantly for the eventual implementation in CAM, it also saves the network and weights.
###Code
!python train.py -c nn_config/000_example.yml
###Output
2019-04-03 15:23:20.846557: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 AVX512F FMA
2019-04-03 15:23:20.982342: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties:
name: GeForce GTX 1080 major: 6 minor: 1 memoryClockRate(GHz): 1.7335
pciBusID: 0000:b3:00.0
totalMemory: 7.93GiB freeMemory: 5.87GiB
2019-04-03 15:23:20.982378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:21.453378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:21.453417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:21.453425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:21.453721: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
04/03/2019 03:23:21 PM Create training and validation data generators
04/03/2019 03:23:21 PM Build model
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 256) 78080 input_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256) 0 dense[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 256) 65792 leaky_re_lu[0][0]
__________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 256) 0 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 256) 65792 leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 256) 0 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 214) 54998 leaky_re_lu_2[0][0]
__________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 214) 0 dense_3[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
leaky_re_lu_3[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 264,662
Trainable params: 264,662
Non-trainable params: 0
__________________________________________________________________________________________________
None
04/03/2019 03:23:21 PM Compile model
04/03/2019 03:23:22 PM Train model
2019-04-03 15:23:23.590239: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:23.590287: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:23.590296: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:23.590305: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:23.590595: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
Learning rate = 0.01
Epoch 1/10
3448/3448 [==============================] - 89s 26ms/step - loss: 66.6859 - mean_squared_error: 133.3718 - mass_loss: 3.4461e-10 - ent_loss: 3.2758e-10 - val_loss: 51.0592 - val_mean_squared_error: 102.1183 - val_mass_loss: 3.8388e-10 - val_ent_loss: 1.9052e-10
Learning rate = 0.01
Epoch 2/10
3448/3448 [==============================] - 82s 24ms/step - loss: 50.0245 - mean_squared_error: 100.0490 - mass_loss: 3.7326e-10 - ent_loss: 3.3711e-10 - val_loss: 50.6125 - val_mean_squared_error: 101.2250 - val_mass_loss: 4.0006e-10 - val_ent_loss: 2.0463e-10
Learning rate = 0.002
Epoch 3/10
3448/3448 [==============================] - 82s 24ms/step - loss: 44.0968 - mean_squared_error: 88.1936 - mass_loss: 3.8555e-10 - ent_loss: 3.3528e-10 - val_loss: 45.2651 - val_mean_squared_error: 90.5302 - val_mass_loss: 3.9009e-10 - val_ent_loss: 1.8705e-10
Learning rate = 0.002
Epoch 4/10
3448/3448 [==============================] - 82s 24ms/step - loss: 43.1805 - mean_squared_error: 86.3610 - mass_loss: 3.9110e-10 - ent_loss: 3.3402e-10 - val_loss: 44.7589 - val_mean_squared_error: 89.5178 - val_mass_loss: 3.6946e-10 - val_ent_loss: 1.7980e-10
Learning rate = 0.0004000000000000001
Epoch 5/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.5774 - mean_squared_error: 83.1547 - mass_loss: 3.9188e-10 - ent_loss: 3.3378e-10 - val_loss: 43.3000 - val_mean_squared_error: 86.5999 - val_mass_loss: 4.0520e-10 - val_ent_loss: 1.9103e-10
Learning rate = 0.0004000000000000001
Epoch 6/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.2948 - mean_squared_error: 82.5897 - mass_loss: 3.9295e-10 - ent_loss: 3.3513e-10 - val_loss: 43.2729 - val_mean_squared_error: 86.5458 - val_mass_loss: 3.8670e-10 - val_ent_loss: 1.8113e-10
Learning rate = 8.000000000000002e-05
Epoch 7/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8886 - mean_squared_error: 81.7773 - mass_loss: 3.9385e-10 - ent_loss: 3.3514e-10 - val_loss: 42.9063 - val_mean_squared_error: 85.8126 - val_mass_loss: 4.0482e-10 - val_ent_loss: 1.8694e-10
Learning rate = 8.000000000000002e-05
Epoch 8/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8127 - mean_squared_error: 81.6254 - mass_loss: 3.9597e-10 - ent_loss: 3.3652e-10 - val_loss: 42.8711 - val_mean_squared_error: 85.7422 - val_mass_loss: 4.0361e-10 - val_ent_loss: 1.8883e-10
Learning rate = 1.6000000000000003e-05
Epoch 9/10
3448/3448 [==============================] - 83s 24ms/step - loss: 40.7166 - mean_squared_error: 81.4333 - mass_loss: 3.9348e-10 - ent_loss: 3.3688e-10 - val_loss: 42.8298 - val_mean_squared_error: 85.6596 - val_mass_loss: 4.0140e-10 - val_ent_loss: 1.8511e-10
Learning rate = 1.6000000000000003e-05
Epoch 10/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.7036 - mean_squared_error: 81.4072 - mass_loss: 3.9575e-10 - ent_loss: 3.3639e-10 - val_loss: 42.8211 - val_mean_squared_error: 85.6422 - val_mass_loss: 4.0147e-10 - val_ent_loss: 1.8437e-10
04/03/2019 03:37:14 PM Saving model as ./saved_models/000_example/model.h5
###Markdown
We see that we are overfitting quite a bit which is to be expected with such a small dataset.
###Code
!ls ./saved_models/000_example/
###Output
inp_div.txt layer2_bias.txt layer4_bias.txt weights.h5
inp_sub.txt layer2_kernel.txt layer4_kernel.txt
layer1_bias.txt layer3_bias.txt model.h5
layer1_kernel.txt layer3_kernel.txt out_scale.txt
###Markdown
Model diagnosticsFinally, we would like to know how well our model does in more detail that just looking at the loss.For this I wrote the `ModelDiagnostics` class. It is designed to be convenient. Let's see what it can do.For basic usage it only needs two arguments: First, the configuration file used for neural network training and second the data file which is to be used for validation.
###Code
from cbrain.model_diagnostics import ModelDiagnostics
md = ModelDiagnostics('nn_config/000_example.yml', '/local/S.Rasp/preprocessed_data/000_valid.nc')
###Output
_____no_output_____
###Markdown
PlottingThe first thing we can do is plot the truth alongside the model prediction. These functions take all the regular matplotlib arguments.
###Code
md.plot_double_yz(itime=0, ilon=0, var='PHQ', vmin=-8e-8, vmax=8e-8, cmap='bwr');
md.plot_double_xy(0, 0, 'PRECT');
###Output
_____no_output_____
###Markdown
Compute statisticsLast but not least, we can compute statistics over the entire validation dataset.
###Code
md.compute_stats()
###Output
_____no_output_____
###Markdown
Now there is a stats dictionary containing many statistics. If you want to implement your own statistics, you wil have to do so in the `compute_stats` method.
###Code
md.stats.keys()
###Output
_____no_output_____
###Markdown
One common statistic, for example, is the R2. So let's plot that. The R2 is averaged over time but all other dimensions are still available. Further, the vertical level is still stacked, so we will have to figure out the variable indices.
###Code
md.stats['r2'].shape
plt.matshow(md.stats['r2'][:, :, md.get_output_var_idx('PHQ')].mean(1).T, vmin=0, vmax=1, cmap='Spectral')
plt.colorbar(shrink=0.7)
###Output
_____no_output_____
###Markdown
We also like looking at the horizontally averaged R2 for each variable.
###Code
md.stats['hor_r2'].shape
md.valid_gen.output_vars[:7]
# Get the vertical coordinate in pressure levels
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
P = (P[1:] + P[:-1]) / 2 / 100
fig, ax = plt.subplots(figsize=(8, 8))
for v in md.valid_gen.output_vars[:7]:
ax.plot(md.stats['hor_r2'][md.get_output_var_idx(v)], P, label=v)
ax.set_xlim(0, 1)
ax.invert_yaxis()
plt.legend()
###Output
_____no_output_____
###Markdown
Acme: Quickstart Guide to installing Acme and training your first D4PG agent. Select your environment libraryNote: `dm_control` requires a valid Mujoco license.
###Code
environment_library = 'gym' # @param ['dm_control', 'gym']
###Output
_____no_output_____
###Markdown
Add your Mujoco license hereNote: only required for `dm_control`.
###Code
mjkey = """
""".strip()
if not mjkey and environment_library == 'dm_control':
raise ValueError(
'A Mujoco license is required for `dm_control`, if you do not have on '
'consider selecting `gym` from the dropdown menu in the cell above.')
###Output
_____no_output_____
###Markdown
Installation Install Acme
###Code
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
###Output
_____no_output_____
###Markdown
Install the environment libraryWithout a valid license you won't be able to use the `dm_control` environments but can still follow this colab using the `gym` environments.If you have a personal Mujoco license (_not_ an institutional one), you may need to follow the instructions at https://research.google.com/colaboratory/local-runtimes.html to run a Jupyter kernel on your local machine.This will allow you to install `dm_control` by following instructions inhttps://github.com/deepmind/dm_control and using a personal MuJoCo license.
###Code
#@test {"skip": true}
if environment_library == 'dm_control':
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL dependencies
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Get MuJoCo binaries
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Install dm_control
!pip install dm_control
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Check that the installation succeeded
try:
from dm_control import suite
env = suite.load('cartpole', 'swingup')
pixels = env.physics.render()
except Exception as e:
raise RuntimeError(
'Something went wrong during installation. Check the shell output above '
'for more information. If you do not have a valid Mujoco license, '
'consider selecting `gym` in the dropdown menu at the top of this '
'Colab.') from e
else:
del suite, env, pixels
elif environment_library == 'gym':
!pip install gym
###Output
Requirement already satisfied: gym in /home/templarares/acmeNew/lib/python3.7/site-packages (0.10.11)
Requirement already satisfied: requests>=2.0 in /home/templarares/acmeNew/lib/python3.7/site-packages (from gym) (2.26.0)
Requirement already satisfied: scipy in /home/templarares/acmeNew/lib/python3.7/site-packages (from gym) (1.7.3)
Requirement already satisfied: six in /home/templarares/acmeNew/lib/python3.7/site-packages (from gym) (1.16.0)
Requirement already satisfied: pyglet>=1.2.0 in /home/templarares/acmeNew/lib/python3.7/site-packages (from gym) (1.3.2)
Requirement already satisfied: numpy>=1.10.4 in /home/templarares/acmeNew/lib/python3.7/site-packages (from gym) (1.21.4)
Requirement already satisfied: future in /home/templarares/acmeNew/lib/python3.7/site-packages (from pyglet>=1.2.0->gym) (0.18.2)
Requirement already satisfied: idna<4,>=2.5 in /home/templarares/acmeNew/lib/python3.7/site-packages (from requests>=2.0->gym) (3.3)
Requirement already satisfied: certifi>=2017.4.17 in /home/templarares/acmeNew/lib/python3.7/site-packages (from requests>=2.0->gym) (2021.10.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/templarares/acmeNew/lib/python3.7/site-packages (from requests>=2.0->gym) (1.26.7)
Requirement already satisfied: charset-normalizer~=2.0.0 in /home/templarares/acmeNew/lib/python3.7/site-packages (from requests>=2.0->gym) (2.0.9)
###Markdown
Install visualization packages
###Code
!sudo apt-get install -y xvfb ffmpeg
!pip install imageio
!pip install PILLOW
!pip install pyvirtualdisplay
###Output
_____no_output_____
###Markdown
Import Modules
###Code
!unset http_proxy
!unset https_proxy
!echo $http_proxy
!echo $https_proxy
import IPython
from acme import environment_loop
from acme import specs
from acme import wrappers
from acme.agents.tf import d4pg
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import numpy as np
import sonnet as snt
# Import the selected environment lib
if environment_library == 'dm_control':
from dm_control import suite
elif environment_library == 'gym':
import gym
# Imports required for visualization
import pyvirtualdisplay
import imageio
import base64
# Set up a virtual display for rendering.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
###Output
###Markdown
Load an environmentWe can now load an environment. In what follows we'll create an environment and grab the environment's specifications.
###Code
if environment_library == 'dm_control':
environment = suite.load('cartpole', 'balance')
elif environment_library == 'gym':
environment = gym.make('MountainCarContinuous-v0')
environment = wrappers.GymWrapper(environment) # To dm_env interface.
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
# Make sure the environment outputs single-precision floats.
environment = wrappers.SinglePrecisionWrapper(environment)
# Grab the spec of the environment.
environment_spec = specs.make_environment_spec(environment)
###Output
_____no_output_____
###Markdown
Create a D4PG agent
###Code
#@title Build agent networks
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)
# Create the shared observation network; here simply a state-less operation.
observation_network = tf2_utils.batch_concat
# Create the deterministic policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP((256, 256, 256), activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(environment_spec.actions),
])
# Create the distributional critic network.
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP((512, 512, 256), activate_final=True),
networks.DiscreteValuedHead(vmin=-150., vmax=150., num_atoms=51),
])
# Create a logger for the agent and environment loop.
agent_logger = loggers.TerminalLogger(label='agent', time_delta=10.)
env_loop_logger = loggers.TerminalLogger(label='env_loop', time_delta=10.)
# Create the D4PG agent.
agent = d4pg.D4PG(
environment_spec=environment_spec,
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
sigma=1.0,
logger=agent_logger,
checkpoint=False
)
# Create an loop connecting this agent to the environment created above.
env_loop = environment_loop.EnvironmentLoop(
environment, agent, logger=env_loop_logger)
###Output
[reverb/cc/platform/tfrecord_checkpointer.cc:150] Initializing TFRecordCheckpointer in /tmp/tmpfib77imy.
[reverb/cc/platform/tfrecord_checkpointer.cc:386] Loading latest checkpoint from /tmp/tmpfib77imy
[reverb/cc/platform/default/server.cc:71] Started replay server on port 24442
2021-12-14 15:20:40.245526: W tensorflow/python/util/util.cc:368] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.
###Markdown
Run a training loop
###Code
# Run a `num_episodes` training episodes.
# Rerun this cell until the agent has learned the given task.
env_loop.run(num_episodes=100)
###Output
WARNING:tensorflow:Calling GradientTape.gradient on a persistent tape inside its context is significantly less efficient than calling it outside the context (it causes the gradient ops to be recorded on the tape, leading to increased CPU and memory usage). Only call GradientTape.gradient inside the context if you actually want to trace the gradient in order to compute higher order derivatives.
###Markdown
Visualize an evaluation loop Helper functions for rendering and vizualization
###Code
# Create a simple helper function to render a frame from the current state of
# the environment.
if environment_library == 'dm_control':
def render(env):
return env.physics.render(camera_id=0)
elif environment_library == 'gym':
def render(env):
return env.environment.render(mode='rgb_array')
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
def display_video(frames, filename='temp.mp4'):
"""Save and display video."""
# Write video
with imageio.get_writer(filename, fps=60) as video:
for frame in frames:
video.append_data(frame)
# Read video and display the video
video = open(filename, 'rb').read()
b64_video = base64.b64encode(video)
video_tag = ('<video width="320" height="240" controls alt="test" '
'src="data:video/mp4;base64,{0}">').format(b64_video.decode())
return IPython.display.HTML(video_tag)
###Output
_____no_output_____
###Markdown
Run and visualize the agent in the environment for an episode
###Code
timestep = environment.reset()
frames = [render(environment)]
while not timestep.last():
# Simple environment loop.
action = agent.select_action(timestep.observation)
timestep = environment.step(action)
# Render the scene and add it to the frame stack.
frames.append(render(environment))
# Save and display a video of the behaviour.
display_video(np.array(frames))
###Output
WARNING:imageio_ffmpeg:IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (600, 400) to (608, 400) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).
[swscaler @ 0x591c440] Warning: data is not aligned! This can lead to a speed loss
###Markdown
Quickstart: Using the Speech Service from Python
This sample shows how to use the Speech Service through the Speech SDK for Python. It illustrates how the SDK can be used to recognize speech from microphone input.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-python) on the SDK documentation page for step-by-step instructions.
Prerequisites
Before you get started, here's a list of prerequisites:
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
* Python 3.5 or later needs to be installed. Downloads are available [here](https://www.python.org/downloads/).
* The Python Speech SDK package is available for Windows (x64 or x86), Mac (macOS X version 10.12 or later), and Linux (x64; Ubuntu 16.04 or Ubuntu 18.04).
* On Ubuntu 16.04 or 18.04, run the following commands for the installation of required packages:
```sh
sudo apt-get update
sudo apt-get install libssl1.0.0 libasound2
```
* On Debian 9, run the following commands for the installation of required packages:
```sh
sudo apt-get update
sudo apt-get install libssl1.0.2 libasound2
```
* On Windows you need the [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
Get the Speech SDK Python Package
**By downloading the Microsoft Cognitive Services Speech SDK, you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license201809).**
The Cognitive Services Speech SDK Python package can be installed from [pyPI](https://pypi.org/) using this command:
```sh
pip install azure-cognitiveservices-speech
```
First, set up some general items. Import the Speech SDK Python:
###Code
import azure.cognitiveservices.speech as speechsdk
###Output
_____no_output_____
###Markdown
Set up the subscription info for the Speech Service:
###Code
speech_key, service_region = "YourSubscriptionKey", "YourServiceRegion"
###Output
_____no_output_____
###Markdown
Create an instance of a speech config with specified subscription key and service region.Replace with your own subscription key and service region (e.g., "westus").
###Code
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
###Output
_____no_output_____
###Markdown
Create a recognizer with the given settings. Since no explicit audio config is specified, the default microphone will be used (make sure the audio settings are correct).
###Code
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
###Output
_____no_output_____
###Markdown
Starts speech recognition, and returns after a single utterance is recognized. The end of asingle utterance is determined by listening for silence at the end or until a maximum of 15seconds of audio is processed. The task returns the recognition text as result. Note: Since `recognize_once()` returns only a single utterance, it is suitable only for singleshot recognition like command or query. For long-running multi-utterance recognition, use `start_continuous_recognition()` instead.
###Code
result = speech_recognizer.recognize_once()
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
###Output
_____no_output_____
###Markdown
Quickstart guideIn this notebook we will through all the steps from downloading the data and training a model to evaluating the results. Check out the `environment.yml` file for the required Python packages.
###Code
import xarray as xr
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Downloading the dataThe data is hosted here. For this guide we will simply download the 500 hPa geopotential data (Z500).
###Code
# This might take a few minutes
!wget https://ndownloader.figshare.com/files/20482014 -O geopotential_500_5.625deg.zip
!mkdir -p geopotential_500
!unzip -d geopotential_500/ geopotential_500_5.625deg.zip
###Output
Archive: geopotential_500_5.625deg.zip
inflating: geopotential_500/geopotential_500hPa_1979_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1980_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1981_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1982_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1983_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1984_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1985_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1986_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1987_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1988_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1989_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1990_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1991_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1992_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1993_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1994_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1995_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1996_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1997_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1998_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1999_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2000_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2001_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2002_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2003_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2004_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2005_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2006_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2007_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2008_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2009_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2010_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2011_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2012_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2013_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2014_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2015_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2016_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2017_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2018_5.625deg.nc
###Markdown
Open the data
###Code
z500 = xr.open_mfdataset('geopotential_500/*.nc')
z500
# Plot an example
z500.z.isel(time=0).plot();
###Output
_____no_output_____
###Markdown
Create a simple climatological forecastRemember that we are using the years 2017 and 2018 for testing/evaluation, so we are not allowed to use these years to train any data-driven model.For more information on the climatology and persistence forecasts used in the paper check out `notebooks/1-climatology-persistence.ipynb`.
###Code
# To speed things up we will just take the mean for 2016
climatology = z500.sel(time=slice('2016', '2016')).mean('time').load()
climatology.z.plot()
###Output
_____no_output_____
###Markdown
Evaluate the climatologyPlease check the paper for details on the evaluation metric. Here we will use the functions from `src/score.py`. To make sure we are always using the same targets for testing, we also implemented a function to load the test data.
###Code
from src.score import *
z500_test = load_test_data('geopotential_500/', 'z')[::12] # Take data only every 12 hours to spped up computation on Binder
rmse_climatology = compute_weighted_rmse(climatology.z, z500_test).load()
rmse_climatology
###Output
_____no_output_____
###Markdown
So we get a climatological RMSE of 1080 m^2/s^2 which is very similar to the RMSE we get for the climatology for all training years. Train a neural networkNow let's train a simple convolutional neural network. We are using several functions defined in `src/train_nn.py`. You can use and modify these or write your own function for data loading etc. For more information on the the networks check out `notebooks/3-cnn-example.ipynb`.
###Code
from src.train_nn import *
# This limits TF memory usage on the GPU
# limit_mem()
###Output
WARNING:tensorflow:From /home/rasp/repositories/weather-benchmark/src/train_nn.py:12: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /home/rasp/repositories/weather-benchmark/src/train_nn.py:14: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
###Markdown
First, we need to create the data generators for training, validation and testing. The main reason why we are using data generators instead of just loading the data as Numpy arrays is that this would require loading the same data twice since the features and targets are the same fields, just offset in time.
###Code
bs = 32
lead_time = 5*24
var_dict = {'z': None}
# Use 2015 for training and 2016 for validation
dg_train = DataGenerator(
z500.sel(time=slice('2015', '2015')), var_dict, lead_time, batch_size=bs, load=True)
dg_valid = DataGenerator(
z500.sel(time=slice('2016', '2016')), var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
# Now also a generator for testing. Impartant: Shuffle must be False!
dg_test = DataGenerator(z500.sel(time=slice('2017', '2018')).isel(time=slice(0, None, 12)), # Limiting the data for Binder
var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
X, y = dg_train[0]
# Batches have dimensions [batch_size, lat, lon, channels]
X.shape, y.shape
###Output
_____no_output_____
###Markdown
Now let's build a simple fully convolutional network. We are using periodic convolutions in the longitude direction. These are defined in `train_nn.py`.
###Code
cnn = keras.models.Sequential([
PeriodicConv2D(filters=32, kernel_size=5, activation='relu', input_shape=(32, 64, 1,)),
PeriodicConv2D(filters=1, kernel_size=5)
])
cnn.summary()
cnn.compile(keras.optimizers.Adam(1e-4), 'mse')
# Train a little bit ;)
cnn.fit_generator(dg_train, epochs=1, validation_data=dg_valid)
###Output
270/270 [==============================] - 6s 21ms/step - loss: 1.7766 - val_loss: 1.0282
###Markdown
Create a prediction and compute scoreNow that we have a model (albeit a crappy one) we can create a prediction. For this we need to create a forecast for each forecast initialization time in the testing range (2017-2018) and unnormalize it. We then convert the forecasts to a Xarray dataset which allows us to easily compute the RMSE. All of this is taken care of in the `create_predictions()` function.
###Code
preds = create_predictions(cnn, dg_test)
preds
compute_weighted_rmse(preds.z, z500_test).load()
time = '2017-03-02T00'
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
z500_test.sel(time=time).plot(ax=ax1)
preds.sel(time=time).z.plot(ax=ax2);
###Output
_____no_output_____
###Markdown
Quickstart for MNIST-1DSam Greydanus | 2020This notebook shows how to build the MNIST-1D dataset and train some baselines*._* If you're running this in Colab, it's best to use a GPU runtime._
###Code
# Run this if you're in a Colab
!git clone https://github.com/greydanus/mnist1d
import numpy as np
import torch
import random
import matplotlib.pyplot as plt
import sys ; sys.path.append('..') # useful if you're running locally
import mnist1d
from mnist1d.data import get_templates, get_dataset_args, get_dataset
from mnist1d.train import get_model_args, train_model
from mnist1d.models import ConvBase, GRUBase, MLPBase, LinearBase
from mnist1d.utils import set_seed, plot_signals, ObjectView, from_pickle
# Try attaching to GPU
DEVICE = str(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
print('Using:', DEVICE)
###Output
Using: cpu
###Markdown
Step 1: Visualize templatesTo construct MNIST-1D, we start with ten 1D signals, each of which resembles a digit in the range 0-9.
###Code
templates = get_templates()
print("Templates for the MNIST-1D dataset:")
x = templates['x']
t = templates['t']
y = templates['y']
fig = plot_signals(x, t, labels=y, ratio=1.33, dark_mode=True)
fig = plot_signals(x, t, labels=y, ratio=1.33, dark_mode=False)
###Output
_____no_output_____
###Markdown
Step 2: TransformNow we are going to transform these signals in such a way that they become difficult to classify. Nonlinear representations and good spatial priors will become very important in order to classify them.
###Code
get_dataset_args(as_dict=True)
arg_dict = get_dataset_args(as_dict=True)
arg_dict['padding'] = [36, 60]
arg_dict['max_translation'] = 48
arg_dict['scale_coeff'] = 0
arg_dict['corr_noise_scale'] = 0
arg_dict['iid_noise_scale'] = 0
arg_dict['shear_scale'] = 0
args = ObjectView(arg_dict)
x = templates['x']
t = templates['t']
y = templates['y']
set_seed(args.seed)
fig = plot_signals(x, t, labels=y, args=args, ratio=2.2, do_transform=True)
args = get_dataset_args()
x = templates['x']
t = templates['t']
y = templates['y']
set_seed(args.seed)
fig = plot_signals(x, t, labels=y, args=args, ratio=2.2, do_transform=True)
###Output
_____no_output_____
###Markdown
Step 3: Make a datasetNext we are going to construct a classification dataset, analogous to MNIST but much smaller, using these signals. Note that the dataset gets saved as a small pickle file, so you don't have to build it from scratch every time.
###Code
args = get_dataset_args()
data = get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=True)
args = get_dataset_args()
data = get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)
print("Examples in training set: {}".format(len(data['y'])))
print("Examples in test set: {}".format(len(data['y_test'])))
print("Length of each example: {}".format(data['x'].shape[-1]))
print("Number of classes: {}".format(len(data['templates']['y'])))
###Output
Successfully loaded data from ./mnist1d_data.pkl
Examples in training set: 4000
Examples in test set: 1000
Length of each example: 40
Number of classes: 10
###Markdown
Step 3b: Download the baseline datasetIt's always possible that the open-source libraries that we used to synthesize this dataset will change. Even if they change slightly, the dataset one obtains from running the code above may change slightly, making it hard for you and I to compare results.If you want to use MNIST-1D as a baseline, we suggest that you load it using the pickle file provided in the repo. The dataset contained in that file will serve as the one-and-only, original MNIST1D dataset. The link is https://github.com/greydanus/mnist1d/raw/master/mnist1d_data.pkl. The `mnist1d.get_dataset` function loads this file when `download` is set to `True`:
###Code
args = get_dataset_args()
data = get_dataset(args, path='./mnist1d_data.pkl', download=True) # This is the default setting
###Output
Downloading MNIST1D dataset from https://github.com/greydanus/mnist1d/raw/master/mnist1d_data.pkl
Saving to ./mnist1d_data.pkl
Successfully loaded data from ./mnist1d_data.pkl
###Markdown
Step 4: Run baselines*We'll see that:* Nonlinear classifiers do better than linear classifiers.* Classifiers with transformation invariance and local connectivity (CNNs) do better than ones without these properties (MLPs).An additional note:* Humans do just a bit better than the other baselines (human performance is 95.8% on 500 test set examples). This suggests that shortcut learning and memorization don't help much._*You may see slightly different numbers than the ones from the default run. This is due to the fact that you are using a GPU instead of CPU: PyTorch's random seeding function ``torch.cuda.manual_seed_all(seed)`` causes this difference. Your results will be reproducible between GPUs._
###Code
get_model_args(as_dict=True)
# Note: if you run on a GPU, the walltimes will be _much_ lower
args = get_model_args()
args.total_steps = 6000
args.device = DEVICE
set_seed(args.seed)
model = ConvBase(output_size=args.output_size)
results_cnn = train_model(data, model, args)
print()
set_seed(args.seed)
model = GRUBase(input_size=1, output_size=args.output_size)
results_gru = train_model(data, model, args)
print()
set_seed(args.seed)
model = MLPBase(args.input_size, args.output_size)
results_mlp = train_model(data, model, args)
print()
set_seed(args.seed)
model = LinearBase(args.input_size, args.output_size)
results_lin = train_model(data, model, args)
print()
fig = plt.figure(figsize=(4.1, 3), dpi=150)
plt.subplot(1,1,1)
t = range(0, args.total_steps+1, args.eval_every)
plt.plot(t, results_lin['test_acc'], 'r-', label='logistic')
plt.plot(t, results_mlp['test_acc'], 'g-', label='mlp')
plt.plot(t, results_cnn['test_acc'], 'b-', label='cnn')
plt.plot(t, results_gru['test_acc'], 'c-', label='gru')
plt.plot(t, [95.8]*len(t), 'k-', label='human')
plt.title('Test accuracy') ; plt.xlabel("Train step") ; plt.legend(fontsize=6.5, ncol=5, loc='lower right')
plt.xticks(fontsize=9) ; plt.yticks(fontsize=9)
plt.ylim(0,105)
plt.tight_layout() ; plt.show()
###Output
_____no_output_____
###Markdown
Jupyter Quick Start Tutorial by Jennifer Walker_Any questions or comments? I'd love to hear from you! Please email me at [email protected]._This tutorial is intended as a **minimal introduction** to quickly get you up and running with the basics of **JupyterLab** and **Jupyter notebooks**. To work through this tutorial as a live demo online, click the "launch binder" button below (it might take a minute or two to load). For more comprehensive and in-depth Jupyter resources, check out the [JupyterLab documentation](http://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html), [Jupyter notebook documentation](http://jupyter-notebook.readthedocs.io/en/stable/), and this [tutorial from PyCon 2017](http://nbviewer.jupyter.org/github/ipython/ipython-in-depth/blob/master/Index.ipynb). _Note: The latter two resources show how to work on a notebook within the classic Jupyter notebook app, rather than the newer JupyterLab development environment that we're focusing on here, so the user interface will look a bit different, but the ideas and structure of the notebook are the same whether you use the classic notebook or JupyterLab._ JupyterLabTo install and run JupyterLab, follow [these instructions](https://jenfly.github.io/datajam-python/SETUP) and then launch JupyterLab from Anaconda Navigator (or type `jupyter lab` at the command line). When you launch JupyterLab, it will look similar to the screenshot below. If you're working through this tutorial locally on your computer, use the **Files Sidebar** to navigate to the folder you'd like to save your work in. (If you're doing the live demo online you can skip this step.) From here you can:- Open an existing Jupyter notebook (`.ipynb` file such as this one) by double-clicking on it, or- Create a new Jupyter notebook in the current working directory by clicking the Python icon listed under "Notebook" in the **Launcher**.  _Note: If you launch JupyterLab from Anaconda Navigator, it will initialize from your user directory (e.g., `C:\Users\jenfly`) and you will only be able to access sub-folders and files within this directory. To launch JupyterLab from other directories, use the command line (Anaconda Prompt on Windows, Terminal on Mac)._Within the JupyterLab development environment, you can:- View, edit, and run Jupyter notebooks- Rename, move, and delete files in the Files Sidebar as you would in your computer's file manager- View CSV files in the **CSV Viewer** — just double-click on the file in the Files Sidebar- Create and edit .py, .md and other text files in the **Text Editor**- View .md files with **Markdown Preview** — right-click on the file in the Files Sidebar and select Open $\rightarrow$ Markdown Preview- View PDF documents — just double-click on the file in the Files Sidebar- Work in the **IPython Console** for interactive coding sessions that you don't want to save in a notebook- Work at the command line in **Terminal**- and more!The rest of this tutorial will focus on Jupyter notebooks. Jupyter NotebooksThis tutorial document is a Jupyter notebook, consisting of a series of "cells" which can be **code cells**, where you execute snippets of code and display the output, or **Markdown cells**—like this one—with formatted text, images, equations, and more. The screenshot below shows an example notebook as it appears in JupyterLab.  **Let's create a new Jupyter notebook:** From the Launcher, click the Python icon under "Notebook". A new notebook `Untitled.ipynb` will open and you'll also see the new file `Untitled.ipynb` appear in the Files Sidebar. You can **rename the notebook** by clicking in the tab at the top of the notebook or right-clicking on the file in the Files Sidebar. _Note: If you're in the live demo, this new notebook will only exist for as long as the demo is open and active._ Jupyter notebooks **auto-save** frequently, like documents in Google Docs. If you want to manually save a notebook, you can click the Save button on the **Notebook Toolbar** or press `Ctrl-S` (`Cmd-S` on a Mac). Code CellsBelow is a Python code cell. **To run a code cell**, click in a cell and press `Shift-Enter` or click the play button in the Notebook Toolbar. This will run the code in the cell and advance to the next cell, creating a new one if none exists below.Any output from the code will be displayed below the cell.
###Code
animals = ['cat', 'dog', 'horse', 'rabbit', 'duck']
for animal in animals:
print(animal)
###Output
cat
dog
horse
rabbit
duck
###Markdown
**To edit a code cell**, simply click inside it and start editing the code. Try editing the code in the cell above—for example, add some more animals to the list—and then run the cell. _Note: If you edit a cell but don't run it, it will retain the output from before the edit, which can get confusing. Make sure to always run a cell right away when you edit it._Similar to the standard Python console, if you run some code without assigning the output to a variable, it will simply display the output. This is handy for displaying the results of a calculation without needing to use `print`.
###Code
2 + 2
round(3.14159, 2)
###Output
_____no_output_____
###Markdown
With multi-line cells, such as below, each line of code is executed but the only unassigned output displayed is from the last line:
###Code
2 + 2
round(3.14159, 2)
###Output
_____no_output_____
###Markdown
Markdown CellsIn Markdown cells, you can write plain text or add formatting and other elements with [Markdown](https://www.markdownguide.org/getting-started). These include headers, **bold text**, _italic text_, hyperlinks, equations $A=\pi r^2$, inline code `print('Hello world!')`, bulleted lists, and more.**To edit a Markdown cell**, you need to double-click inside it (unlike code cells, where you only need to single-click). Try double-clicking on this cell to see what the Markdown syntax looks like and make some edits. As with a code cell, you press `Shift+Enter` or click the play button in the Notebook Toolbar to run the cell and render the formatting. By default, new cells are always code cells. **To create a Markdown cell**, select an empty code cell and then click on the Notebook Toolbar where it says "Code"—this displays a dropdown menu where you can change the cell type to "Markdown".For more Markdown syntax, check out the [Markdown cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). Auto-CompleteIn Jupyter notebooks, auto-complete works for any object, including variable names, functions, attributes, and methods. It even works for file paths and keyword arguments! To try out the auto-complete features, first run the two cells below:
###Code
import math
today = 'Saturday'
tomorrow = 'Sunday'
###Output
_____no_output_____
###Markdown
Then, in the empty cells below, try the following:- Type `to` and press `Tab` to select `today` or `tomorrow` from a list of auto-complete options.- Type `ro` and press `Tab`. It automatically completes with the `round` function name (unless you've previously defined another object with a name starting `ro`).- Type `today.` and press `Tab` (make sure to include the dot) to display available methods and attributes for the string variable `today`.- Type `math.` and press `Tab` (make sure to include the dot) to see what's in the `math` library.- Type `data_file = 'i` and press `Tab`—it will auto-complete to `'iris.csv'`. - _Note: If you're doing this tutorial locally on your computer and don't have an `iris.csv` file, you can select a different file to try this out._ Viewing DocumentationWith any Python object, such as a function or method, you can type `?` after the object name to display its documentation (this is known as "object introspection").
###Code
math.sqrt?
###Output
_____no_output_____
###Markdown
You can also display documentation as you type a function call, by pressing `Shift-Tab` inside the parentheses.If the source code of a function or method is short enough, you can display it with `??` after the object name.
###Code
def fibonacci_sequence(n):
"""Return the first n numbers of the Fibonacci sequence"""
sequence = []
a, b = 0, 1
for i in range(n):
a, b = b, a + b
sequence.append(a)
return sequence
fibonacci_sequence??
###Output
_____no_output_____
###Markdown
Organizing Cells**To add a cell** in between two existing cells, click the upper cell to select it (a vertical blue line to the left indicates that the cell is selected) and then press the `+` button in the Notebook Toolbar. Try adding a few new cells below this one: You can **move cells up or down**: First select the cell by clicking at the outside left of the cell. The cursor will change to a pointer with four arrows (if it doesn't, move the cursor up towards the top of the cell until it changes) and then you can drag the cell up or down. Try moving this Markdown cell up/down, or rearrange the new cells that you added above. You can also copy/cut/paste/delete cells, split a cell into two, and merge multiple cells into one. See the commands listed under "Edit" in the Top Menu.It's important to be aware that unlike a script, where the code in a file is executed from top to bottom, notebooks are **nonlinear**—cells can be executed out of order and multiple times each. This can cause errors or unexpected behaviour if you're not careful. We'll discuss this further in the next section. Managing Code Execution and OutputWhen you open a Jupyter notebook, it launches a process in the background called a **kernel**, which executes code sent by the user and communicates the results back to the notebook interface in the browser. Python-based Jupyter notebooks use an [IPython kernel](https://ipython.org/), which provides many handy features for interactive coding, such as auto-complete, object introspection, magic commands, and more. Jupyter notebooks also support R, Julia, Perl, and over 100 other languages.The input/output history labels to the left of each code cell (e.g., `In[3]`, `Out[3]`) give you information about the history of cell execution for your current kernel session. If there is no number inside the square brackets, the code cell has not yet been run. If there is a number inside the square brackets, it tells you the order of execution of this cell during the current kernel session (the kernel session has a counter that increments by one each time you execute a cell).You can restart a kernel session, which clears out all the variables and imported libraries, while maintaining the output of all the code cells. In addition to running cells one at a time, you can select several cells and run them in order, or run all the notebook's cells, from start to end. These options are available in the Kernel menu and the Run menuFrom the **Top Menu**, try each of the following:- Select Kernel $\rightarrow$ Restart Kernel and Clear All Outputs. You'll see all the code output disappears, the input/output history labels are empty (`In[]`, `Out[]`), and the previously defined variables and imported libraries are no longer available.- Select Run $\rightarrow$ Run All Cells to run all the cells, from top to bottom.- Select Kernel $\rightarrow$ Restart Kernel. You'll see that the code output remains, but previously defined variables and imported libraries are no longer available.- Experiment with other options from the Run menu and the Kernel menu, such as running several selected cells, or running all cells above/below a selected cell. CaveatsSince notebooks are nonlinear, as mentioned in the previous section, and cells can be run multiple times and out of order, you'll find that as you work on your notebook, the numbers in the `In[]`, `Out[]` labels are often not in order. Try running the cell below a few times in a row and see that the number increments by one each time. Then try running the three cells below, in different orders, to see how the numbers change.
###Code
3 - 10
print('Hello world')
round(1/3, 2)
###Output
_____no_output_____
###Markdown
You don't need to worry about these numbers too much, but they can be a **warning sign that you've been running cells out of order** and you might get different output, errors, or unexpected behaviour next time you start fresh with a new kernel session.As a silly example, run each of the following three cells, in order, then go back to the middle cell which contains `print('Hello ' + city)` and run it again, by itself. Now the output of that cell reads `Hello Toronto`, even though the cell immediately above shows a value of `'Vancouver'` being assigned to the variable `city`, making things very confusing.
###Code
city = 'Vancouver'
print('Hello ' + city)
city = 'Toronto'
###Output
_____no_output_____
###Markdown
As you're developing the code in your notebook, it's good practice to periodically restart the kernel and run all cells, —this allows you to make sure that everything works as expected when you run the whole notebook from top to bottom. Whenever feasible, I also try to consolidate any changes to the value of a variable within a single cell or a few adjacent cells, so that it has a consistent value for the rest of the notebook. Interrupting the KernelIf the code in a cell takes a long time to run, you will see `In[*]` to the left of it. To see this in action, let's first import the `time` library:
###Code
import time
###Output
_____no_output_____
###Markdown
Now run the cell below and note the `In[*]`. When it eventually finishes executing, the `*` is replaced with a number. If a cell is taking too long to run, you can interrupt the kernel by going to the **Top Menu** and selecting Kernel $\rightarrow$ Interrupt Kernel. Try running the cell below and interrupting the kernel partway through.
###Code
for i in range(10):
print(i)
time.sleep(5)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
Once in a while, the kernel hangs and becomes unresponsive and you'll see `In[*]` next to cells that should complete execution almost instantaneously. In that case you can try interrupting the kernel, or if that doesn't solve the problem, restart the kernel. Keyboard ShortcutsFor a complete list of keyboard shortcuts, click "Commands" on the Side Menu. Here is a quick overview of some of the most useful keyboard shortcuts. To use these shortcuts, click to the left of the cell(s) to select it in "command mode", rather than "edit mode" (which you enter by clicking inside a code cell or double-clicking inside a Markdown cell):- `A` to insert a new cell above- `B` to insert a new cell below- `DD` to delete the selected cell(s)- `C` to copy selected cell(s)- `X` to cut selected cell(s)- `V` to paste copied/cut cell(s)- `Shift-M` to merge multiple selected cells into a single cell- `M` to change the cell type to Markdown- `Y` to change the cell type to code- `Enter` to enter edit mode Magic CommandsIn addition to auto-complete and object introspection, the IPython kernel provides "magic commands", which are prefixed by the `%` character. From Jake VanderPlas' [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/01.03-magic-commands.html):> These magic commands are designed to succinctly solve various common problems in standard data analysis. Magic commands come in two flavors: line magics, which are denoted by a single % prefix and operate on a single line of input, and cell magics, which are denoted by a double %% prefix and operate on multiple lines of input.For a quick summary of some of the available magic commands (and other features of IPython), run the magic command `%quickref`. For a complete list, check out the[IPython magics documentation](https://ipython.readthedocs.io/en/stable/interactive/magics.html). PlotsOne of the great features of Jupyter notebooks is the ability to display plots alongside the code that created them. The example below shows a `seaborn` plot.
###Code
import seaborn as sns
# Load the example miles per gallon dataset
mpg = sns.load_dataset('mpg')
# Plot mpg vs. horsepower
sns.relplot(data=mpg, x='horsepower', y='mpg', hue='origin', size='weight',
sizes=(20, 200), alpha=0.5, palette="muted", height=4);
###Output
_____no_output_____
###Markdown
To run ITAM you need to provide:+ a lookup table of the inverse CDF+ a lookup table of the target power spectrum.+ the rescaling factor to match the field cell-density variance to the variance computed from the power spectrum.Examples of such files are in the folder 'data_itam/'. They were written by using the module lookup.py moduleThe function *lookup_Pk* writes the lookup table of the target nonlinear power spectrum with Planck cosmology using CLASS (you can easily modify the cosmological parameters directly inside the function).The function *lookup_ppf* writes the target inverse CDF (percentile point function) of the simulation, smoothed at the scale $R_{th}$. In my case density.npy is the binary file containing the density of the simulation (not included in data_itam/). It also saves the just mentioned rescaling factor.In the case you want to implement an analytical one point mapping, take a look at logITAM.py module, where the Eq. 16 from the paper is implemented.
###Code
lookup.lookup_Pk(cosmology='planck',nonlinear=0) # linear power spectrum, useful for the initialization
lookup.lookup_Pk(cosmology='planck',nonlinear=1) # target power spectrum
lookup.lookup_ppf(nsamples=100000, boxsize=256.0, Rth=1.0,density='data_itam/density.npy', pathpk='data_itam/planck_pk.txt', saveto_ppf='data_itam/_ppf.txt',
saveto_rescale='data_itam/rescale_factor.txt')
itm = itam.ITAM(boxsize=256.,ng=256, beta=1.,Rth=1.,eps=0.001,Deps=0.001,pathto_linpk='data_itam/planck_pk_linear.txt',
pathto_pk='data_itam/planck_pk.txt',pathto_ppf = 'data_itam/_ppf.txt',saveto='data_itam/',
pathto_rescale='data_itam/rescale_factor.txt' , plotty=0)
plt.figure(figsize=(1.62*5.5,5.5))
with warnings.catch_warnings():
warnings.simplefilter( "ignore" , category = RuntimeWarning )
plt.semilogx(itm.k,(itm.pk-itm.pk_ng)/itm.pk,lw=2.)
plt.ylim([-0.01,0.01])
plt.xlim([0.02,1.5])
plt.xlabel('$k \ [h/Mpc]$',fontsize='x-large')
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize='x-large')
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
many realizations
###Code
from multiprocessing import cpu_count
print(cpu_count())
itm.make_covariance(nreal=100,cores=4)
kbins = N.loadtxt('data_itam/kbins.txt')
kk,pk = N.loadtxt('data_itam/planck_pk.txt')
psvals = N.loadtxt('data_itam/psvals_ng.txt').T
psvals_g = N.loadtxt('data_itam/psvals_g.txt').T
meanPk = N.mean(psvals,axis=1)
sigmaPk = N.std(psvals,axis=1)
meanPk_g = N.mean(psvals_g,axis=1)
sigmaPk_g = N.std(psvals_g,axis=1)
fig, ax = plt.subplots(figsize=(6*1.62,6))
ax.plot(kbins,meanPk,label='itam',c='b')
ax.plot(kbins,meanPk_g,label='pre-translation',c='darkorange')
ax.fill_between(kbins,meanPk_g+sigmaPk_g,meanPk_g-sigmaPk_g,alpha=0.4,color='darkorange')
ax.fill_between(kbins,meanPk+sigmaPk,meanPk-sigmaPk,alpha=0.4,color='b')
ax.plot(itm.k,itm.pk_g,'--g',label='expected pre-translation',lw=2.0)
ax.plot(itm.k,itm.pk,'--r',label='expected itam',lw=2.0)
ax.set_xlabel(r'$k$ (h/Mpc)',size='x-large')
ax.set_ylabel(r'$P(k)$ $(Mpc/h)^3$',size='large')
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(fontsize='xx-large')
ax.set_ylabel('$P(k) \ [Mpc/h]^3$',fontsize='xx-large')
ax.set_ylim([10.,5e+05])
ax.set_xlim([0.02,3.])
ax.grid()
###Output
_____no_output_____
###Markdown
covariance matrix
###Code
kb = N.loadtxt('data_itam/kbins.txt')
ps = N.loadtxt('data_itam/psvals_ng.txt').T
print(ps.shape)
pscorr = N.corrcoef(ps)
w = kb<0.8
pscorr = pscorr[:,w]
pscorr = pscorr[w,:]
kw = kb[w]
fig,ax1 = plt.subplots(figsize=(5,5))
ax1.imshow(pscorr,origin='lower')
ax1.contour(pscorr,colors='k')
ax1.set_xticks(N.arange(len(kw))[1::16])
ax1.set_yticks(N.arange(len(kw))[1::16])
ax1.set_xticklabels(labels=N.round(kw[1::16],decimals=2))
ax1.set_yticklabels(labels=N.round(kw[1::16],decimals=2))
ax1.set_xlabel(r'$k$ (h/Mpc)',size='x-large')
ax1.set_ylabel(r'$k$ (h/Mpc)',size='x-large')
plt.show()
###Output
_____no_output_____
###Markdown
Quickstart: Using the Storage Service from Python
This sample shows how to use the Blob Service through the Storage SDK for Python.
See the [accompanying article](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python) on the SDK documentation page for step-by-step instructions.
Prerequisites
Before you get started, here's a list of prerequisites:
* An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio).
* An Azure Storage account. [Create a storage account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create).
* Python 2.7 or 3.6+.
Get the Blob Storage Python Package
The Azure Blob Storage Python package can be installed from [pyPI](https://pypi.org/) using this command:
```sh
pip install azure-storage-blob
```
First, set up some general items. Import the Blob Storage Python package:
###Code
import os, uuid
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
###Output
_____no_output_____
###Markdown
Display the version of the Blob Storage Python package:
###Code
print("Azure Blob Storage v" + __version__ + " - Python quickstart sample")
###Output
_____no_output_____
###Markdown
Set the connection string for use with the application.
###Code
connect_str = 'your-connection-string'
###Output
_____no_output_____
###Markdown
Create an instance of the BlobServiceClient class by calling the from_connection_string method. Then, call the create_container method to actually create the container in your storage account.
###Code
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
# Create a unique name for the container
container_name = str(uuid.uuid4())
# Create the container
container_client = blob_service_client.create_container(container_name)
except Exception as ex:
print(f'Exception:\n${ex}')
###Output
_____no_output_____
###Markdown
Creates a local directory to hold data files.
###Code
local_path = "./data"
os.mkdir(local_path)
###Output
_____no_output_____
###Markdown
Creates a text file in the local directory.
###Code
local_file_name = str(uuid.uuid4()) + ".txt"
upload_file_path = os.path.join(local_path, local_file_name)
file = open(upload_file_path, 'w')
file.write("Hello, World!")
file.close()
###Output
_____no_output_____
###Markdown
Gets a reference to a BlobClient object by calling the get_blob_client method on the BlobServiceClient from the Create a container section.
###Code
blob_client = blob_service_client.get_blob_client(container=container_name, blob=local_file_name)
###Output
_____no_output_____
###Markdown
Uploads the local text file to the blob by calling the upload_blob method.
###Code
print("\nUploading to Azure Storage as blob:\n\t" + local_file_name)
with open(upload_file_path, "rb") as data:
blob_client.upload_blob(data)
###Output
_____no_output_____
###Markdown
List the blobs in a container
###Code
print("\nListing blobs...")
blob_list = container_client.list_blobs()
for blob in blob_list:
print("\t" + blob.name)
###Output
_____no_output_____
###Markdown
Clean up the resources the app created by removing the entire container using the delete_container method.
###Code
container_client.delete_container()
###Output
_____no_output_____
###Markdown
**Setup**
###Code
from lib.submitter import *
jwt = '' # User-specified field.
address = '' # User-specified field.
pk = '' # User-specified field.
submitter = Submitter(jwt, address, pk)
###Output
_____no_output_____
###Markdown
View wallet address information.
###Code
print('MATIC balance for {}: {}'.format(submitter.address, submitter.get_matic_balance()))
print('MUSA balance for {}: {}'.format(submitter.address, submitter.get_musa_balance()))
print('Current stake for {}: {}'.format(submitter.address, submitter.get_stake()))
###Output
_____no_output_____
###Markdown
**Retrieve Dataset and run models.** Please follow the instructions and example found [here](https://github.com/rocketcapital-ai/competition_quickstart_dataloader). When completed, please place final prediction csv file in the `file_to_submit` folder. **Set stake.** (optional) Skip if no changes need to be made.
###Code
set_stake_amount = 100.00 # User-specified field.
transaction_success = submitter.set_stake(set_stake_amount)
assert transaction_success, 'Setting stake failed.'
###Output
_____no_output_____
###Markdown
**Submit predictions.**Note that the same `submit_prediction` method should be used for re-submitting predictions.
###Code
submission_file_name = 'my_submission_file.csv' # User-specified field.
transaction_success = submitter.submit_prediction(submission_file_name)
assert transaction_success, 'Submission failed.'
###Output
_____no_output_____
###Markdown
**Retrieve and double-check predictions.** (optional)This section retrieves your submitted files, decrypts them, and compares them to the original file in `file_to_submit`.If the verification fails, please wait a few minutes and perform the verification again. If the problem persists, please re-submit your predictions.
###Code
verification_success = submitter.download_and_check(submission_file_name)
assert verification_success, 'Submission verification failed.'
print('Files are identical. Verification check passed.')
###Output
_____no_output_____
###Markdown
A brief explanation of the revamped CBRAIN moduleIn this guide, we will go through the steps required to preprocess the raw climate model output, train a neural network and then analyze how good it is.
###Code
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
###Output
_____no_output_____
###Markdown
PreprocessingPreprocessing works using the `preprocessing.py` script in the main directory of the repository along with a configuration file. Let's have a look at one such configuration file to see what is required. Here is `000_test.yml`:```yamlvars : [QBP, QCBP, QIBP, TBP, VBP, PS, SOLIN, SHFLX, LHFLX, PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic]in_dir : /local/S.Rasp/sp32fbp_andkua/in_fns : AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-01-0*-00000.ncout_dir: /local/S.Rasp/preprocessed_data/out_fn: 000_train.ncval_in_fns: AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-02-0*-00000.ncval_out_fn: 000_valid.ncnorm_fn: 000_norm.nc```One new thing is that it is not necessary to specifically state the input and output variables at the preprocessing script. These will be chosen later in the data generator. This mean that `vars` can contain as many variables as possible. This should reduce the number of times the preprocessing has to be run.Some of the variables are saved directly in the model output, e.g. `PHQ`, while others, e.g. `QBP` have to be derived. Currently, the following derived variables are available: `*dt_adiabatic`, `*BP` and `PRECST`. If you want to add others, you have to implement it in `convert_dataset.py`.`in_dir` and `in_fns` describe the raw climate model file names. `out_dir` and `out_fn` denote the path to the preprocessed file. The training dataset will additionally be preshuffled.If `val_*` is given, a separate validation dataset will be created.If `norm_fn` is given, statistics will be computed from the training dataset, e.g. the mean and standard deviation. Note that for large training files this takes a very, very long time. Therefore, I recommend computing the normalization file on a small dataset and use it for the large training dataset.
###Code
!python preprocessing.py -c pp_config/000_test.yml
!ls /local/S.Rasp/preprocessed_data/000*
###Output
/local/S.Rasp/preprocessed_data/000_norm.nc
/local/S.Rasp/preprocessed_data/000_train.nc
/local/S.Rasp/preprocessed_data/000_train_shuffle.nc
/local/S.Rasp/preprocessed_data/000_valid.nc
###Markdown
Training the model step-by-stepLet's now go through the steps to train a neural network starting with the new data generator. Data generatorTo read the preprocessed file and feed the data to the neural net, we will use the `DataGenerator` class in the cbrain module. At this stage we will not define the variables we want in the input and output of the neural network.
###Code
in_vars = ['QBP', 'QCBP', 'QIBP', 'TBP', 'VBP',
'Qdt_adiabatic', 'QCdt_adiabatic', 'QIdt_adiabatic', 'Tdt_adiabatic', 'Vdt_adiabatic',
'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS', 'DTVKE',
'FSNT', 'FSNS', 'FLNT', 'FLNS', 'PRECT', 'PRECTEND', 'PRECST', 'PRECSTEN']
###Output
_____no_output_____
###Markdown
Next we need to think about how we want to normalize/scale the inputs and outputs. For the inputs we will use a pretty standard normalization, which we will get to later.As for the outputs, we would like to scale each variable physically. For this we will create a dictionary, which will contain a factor or a vector of factors for each variable.
###Code
scale_dict = {
'PHQ': L_V/G,
'PHCLDLIQ': L_V/G,
'PHCLDICE': L_V/G,
'TPHYSTND': C_P/G,
'QRL': C_P/G,
'QRS': C_P/G,
'DTVKE': C_P/G,
'FSNT': 1,
'FSNS': 1,
'FLNT': 1,
'FLNS': 1,
'PRECT': RHO_L*L_V,
'PRECTEND': 1e-3*RHO_L*L_V,
'PRECST': RHO_L*L_V,
'PRECSTEN': 1e-3*RHO_L*L_V
}
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1]; # Differential pressure [Pa]
for v in ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS']:
scale_dict[v] *= dP
scale_dict['DTVKE'] *= (dP/DT)
###Output
_____no_output_____
###Markdown
Now we will save this dictionary as a pickle file, so that we can later load it in th training script.
###Code
save_pickle('./nn_config/scale_dicts/001_toms_scaling.pkl', scale_dict)
###Output
_____no_output_____
###Markdown
With this dictionary, we can now use create a DataGenerator instance.A word about the normalization. The normalization is handled by Normalizer classes, defined in `normalization.py`. As default in `DataGenerator`, for input normalization the `InputNormalizer` class is used, while the outputs are scaled using the `DictNormalizer`. `DataGenerator` takes a tuple of strings for `input_transform`. This tuple describes which arrays from the normalization file (`norm_fn`) the input will be subtracted and divided by. If you want to create your own fancy normalization, you have to create such a Normalizer which has to have a `transform` method.
###Code
from cbrain.data_generator import DataGenerator
train_gen = DataGenerator(
data_fn = '/local/S.Rasp/preprocessed_data/000_train_shuffle.nc',
input_vars = in_vars,
output_vars = out_vars,
norm_fn = '/local/S.Rasp/preprocessed_data/000_norm.nc',
input_transform = ('mean', 'maxrs'),
output_transform = scale_dict,
batch_size=1024,
shuffle=True
)
X, Y = train_gen[0]; X.shape, Y.shape
###Output
_____no_output_____
###Markdown
Create a model with conservation layersNext we need to create a model. This is just basic Keras. I will show here how to use the conservation layers written by Tom. These layers require some additional input, in particular the normalization information.
###Code
from cbrain.layers import *
from tensorflow.keras.layers import *
inp_layer = Input(shape=(304,))
x = Dense(214, activation='elu')(inp_layer)
x = SurRadLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
x = MassConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
out_layer = EntConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
model = tf.keras.models.Model(inp_layer, out_layer)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 214) 65270 input_1[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
dense[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 65,270
Trainable params: 65,270
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Train the model with the weakly constrained energy lossAgain, this is basic Keras. Tom implemented the option to penalize the network for violating physical constraints, however, which we will do here.I implemented the weakly constrained loss as a class in `losses.py`. When initializing this class, you need to pass the input_tensor from the model and again the normalization information.This loss computes three losses internally: the standard MSE, a loss for mass conservation and a loss for enthalpy conservation. You can weight the losses by changing the `alpha_*` parameters.
###Code
from cbrain.losses import *
weak_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'])
mass_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=1, alpha_ent=0, name='mass_loss')
ent_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=0, alpha_ent=1, name='ent_loss')
model.compile(tf.keras.optimizers.Adam(lr=0.01), loss=weak_loss, metrics=[mass_loss, ent_loss, mse])
model.fit_generator(train_gen, epochs=5)
###Output
Epoch 1/5
3448/3448 [==============================] - 109s 32ms/step - loss: 740.1542 - mass_loss: 3.6446e-11 - ent_loss: 7.8155e-11 - mean_squared_error: 1480.3085
Epoch 2/5
3448/3448 [==============================] - 67s 20ms/step - loss: 540.9411 - mass_loss: 6.3662e-11 - ent_loss: 1.4316e-10 - mean_squared_error: 1081.8822
Epoch 3/5
3448/3448 [==============================] - 67s 19ms/step - loss: 419.6522 - mass_loss: 8.7082e-11 - ent_loss: 1.6894e-10 - mean_squared_error: 839.3044
Epoch 4/5
3448/3448 [==============================] - 66s 19ms/step - loss: 356.3085 - mass_loss: 1.0223e-10 - ent_loss: 1.9572e-10 - mean_squared_error: 712.6170
Epoch 5/5
3448/3448 [==============================] - 67s 19ms/step - loss: 322.0714 - mass_loss: 1.1291e-10 - ent_loss: 2.1134e-10 - mean_squared_error: 644.1428
###Markdown
Naturally, since we are using a conserving network, the conservation losses are basically zero. Puh... Train the network using the train.py scriptDoing the training in a notebook is good for experimentation but for testing different configurations and using large training datasets, we need a command line script, which is called `train.py`. So, let's check out how to use it.Again we will create a configuration file. This is `000_example.yml`:``` Example training configuration fileexp_name: 000_exampledata_dir: /local/S.Rasp/preprocessed_data/train_fn: 000_train_shuffle.ncvalid_fn: 000_valid.ncnorm_fn: 000_norm.ncinputs: [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]outputs: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]input_sub: meaninput_div: maxrsoutput_dict: /home/s/S.Rasp/repositories/CBRAIN-CAM/nn_config/scale_dicts/001_toms_scaling.pklhidden_layers: [128, 214]epochs: 10conservation_layer: Trueloss: weak_loss```Most of the arguments are pretty self-explanatory (I hope). If you are confused look at the definition at the bottom of `train.py`.This script automatically uses learning rate decay.Additionally, and very importantly for the eventual implementation in CAM, it also saves the network and weights.
###Code
!python train.py -c nn_config/000_example.yml
###Output
2019-04-03 15:23:20.846557: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 AVX512F FMA
2019-04-03 15:23:20.982342: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties:
name: GeForce GTX 1080 major: 6 minor: 1 memoryClockRate(GHz): 1.7335
pciBusID: 0000:b3:00.0
totalMemory: 7.93GiB freeMemory: 5.87GiB
2019-04-03 15:23:20.982378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:21.453378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:21.453417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:21.453425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:21.453721: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
04/03/2019 03:23:21 PM Create training and validation data generators
04/03/2019 03:23:21 PM Build model
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 256) 78080 input_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256) 0 dense[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 256) 65792 leaky_re_lu[0][0]
__________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 256) 0 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 256) 65792 leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 256) 0 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 214) 54998 leaky_re_lu_2[0][0]
__________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 214) 0 dense_3[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
leaky_re_lu_3[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 264,662
Trainable params: 264,662
Non-trainable params: 0
__________________________________________________________________________________________________
None
04/03/2019 03:23:21 PM Compile model
04/03/2019 03:23:22 PM Train model
2019-04-03 15:23:23.590239: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:23.590287: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:23.590296: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:23.590305: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:23.590595: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
Learning rate = 0.01
Epoch 1/10
3448/3448 [==============================] - 89s 26ms/step - loss: 66.6859 - mean_squared_error: 133.3718 - mass_loss: 3.4461e-10 - ent_loss: 3.2758e-10 - val_loss: 51.0592 - val_mean_squared_error: 102.1183 - val_mass_loss: 3.8388e-10 - val_ent_loss: 1.9052e-10
Learning rate = 0.01
Epoch 2/10
3448/3448 [==============================] - 82s 24ms/step - loss: 50.0245 - mean_squared_error: 100.0490 - mass_loss: 3.7326e-10 - ent_loss: 3.3711e-10 - val_loss: 50.6125 - val_mean_squared_error: 101.2250 - val_mass_loss: 4.0006e-10 - val_ent_loss: 2.0463e-10
Learning rate = 0.002
Epoch 3/10
3448/3448 [==============================] - 82s 24ms/step - loss: 44.0968 - mean_squared_error: 88.1936 - mass_loss: 3.8555e-10 - ent_loss: 3.3528e-10 - val_loss: 45.2651 - val_mean_squared_error: 90.5302 - val_mass_loss: 3.9009e-10 - val_ent_loss: 1.8705e-10
Learning rate = 0.002
Epoch 4/10
3448/3448 [==============================] - 82s 24ms/step - loss: 43.1805 - mean_squared_error: 86.3610 - mass_loss: 3.9110e-10 - ent_loss: 3.3402e-10 - val_loss: 44.7589 - val_mean_squared_error: 89.5178 - val_mass_loss: 3.6946e-10 - val_ent_loss: 1.7980e-10
Learning rate = 0.0004000000000000001
Epoch 5/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.5774 - mean_squared_error: 83.1547 - mass_loss: 3.9188e-10 - ent_loss: 3.3378e-10 - val_loss: 43.3000 - val_mean_squared_error: 86.5999 - val_mass_loss: 4.0520e-10 - val_ent_loss: 1.9103e-10
Learning rate = 0.0004000000000000001
Epoch 6/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.2948 - mean_squared_error: 82.5897 - mass_loss: 3.9295e-10 - ent_loss: 3.3513e-10 - val_loss: 43.2729 - val_mean_squared_error: 86.5458 - val_mass_loss: 3.8670e-10 - val_ent_loss: 1.8113e-10
Learning rate = 8.000000000000002e-05
Epoch 7/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8886 - mean_squared_error: 81.7773 - mass_loss: 3.9385e-10 - ent_loss: 3.3514e-10 - val_loss: 42.9063 - val_mean_squared_error: 85.8126 - val_mass_loss: 4.0482e-10 - val_ent_loss: 1.8694e-10
Learning rate = 8.000000000000002e-05
Epoch 8/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8127 - mean_squared_error: 81.6254 - mass_loss: 3.9597e-10 - ent_loss: 3.3652e-10 - val_loss: 42.8711 - val_mean_squared_error: 85.7422 - val_mass_loss: 4.0361e-10 - val_ent_loss: 1.8883e-10
Learning rate = 1.6000000000000003e-05
Epoch 9/10
3448/3448 [==============================] - 83s 24ms/step - loss: 40.7166 - mean_squared_error: 81.4333 - mass_loss: 3.9348e-10 - ent_loss: 3.3688e-10 - val_loss: 42.8298 - val_mean_squared_error: 85.6596 - val_mass_loss: 4.0140e-10 - val_ent_loss: 1.8511e-10
Learning rate = 1.6000000000000003e-05
Epoch 10/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.7036 - mean_squared_error: 81.4072 - mass_loss: 3.9575e-10 - ent_loss: 3.3639e-10 - val_loss: 42.8211 - val_mean_squared_error: 85.6422 - val_mass_loss: 4.0147e-10 - val_ent_loss: 1.8437e-10
04/03/2019 03:37:14 PM Saving model as ./saved_models/000_example/model.h5
###Markdown
We see that we are overfitting quite a bit which is to be expected with such a small dataset.
###Code
!ls ./saved_models/000_example/
###Output
inp_div.txt layer2_bias.txt layer4_bias.txt weights.h5
inp_sub.txt layer2_kernel.txt layer4_kernel.txt
layer1_bias.txt layer3_bias.txt model.h5
layer1_kernel.txt layer3_kernel.txt out_scale.txt
###Markdown
Model diagnosticsFinally, we would like to know how well our model does in more detail that just looking at the loss.For this I wrote the `ModelDiagnostics` class. It is designed to be convenient. Let's see what it can do.For basic usage it only needs two arguments: First, the configuration file used for neural network training and second the data file which is to be used for validation.
###Code
from cbrain.model_diagnostics.model_diagnostics import ModelDiagnostics
md = ModelDiagnostics('nn_config/000_example.yml', '/local/S.Rasp/preprocessed_data/000_valid.nc')
###Output
_____no_output_____
###Markdown
PlottingThe first thing we can do is plot the truth alongside the model prediction. These functions take all the regular matplotlib arguments.
###Code
md.plot_double_yz(itime=0, ilon=0, var='PHQ', vmin=-8e-8, vmax=8e-8, cmap='bwr');
md.plot_double_xy(0, 0, 'PRECT');
###Output
_____no_output_____
###Markdown
Compute statisticsLast but not least, we can compute statistics over the entire validation dataset.
###Code
md.compute_stats()
###Output
_____no_output_____
###Markdown
Now there is a stats dictionary containing many statistics. If you want to implement your own statistics, you wil have to do so in the `compute_stats` method.
###Code
md.stats.keys()
###Output
_____no_output_____
###Markdown
One common statistic, for example, is the R2. So let's plot that. The R2 is averaged over time but all other dimensions are still available. Further, the vertical level is still stacked, so we will have to figure out the variable indices.
###Code
md.stats['r2'].shape
plt.matshow(md.stats['r2'][:, :, md.get_output_var_idx('PHQ')].mean(1).T, vmin=0, vmax=1, cmap='Spectral')
plt.colorbar(shrink=0.7)
###Output
_____no_output_____
###Markdown
We also like looking at the horizontally averaged R2 for each variable.
###Code
md.stats['hor_r2'].shape
md.valid_gen.output_vars[:7]
# Get the vertical coordinate in pressure levels
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
P = (P[1:] + P[:-1]) / 2 / 100
fig, ax = plt.subplots(figsize=(8, 8))
for v in md.valid_gen.output_vars[:7]:
ax.plot(md.stats['hor_r2'][md.get_output_var_idx(v)], P, label=v)
ax.set_xlim(0, 1)
ax.invert_yaxis()
plt.legend()
###Output
_____no_output_____
###Markdown
A brief explanation of the revamped CBRAIN moduleIn this guide, we will go through the steps required to preprocess the raw climate model output, train a neural network and then analyze how good it is.
###Code
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
###Output
_____no_output_____
###Markdown
PreprocessingPreprocessing works using the `preprocessing.py` script in the main directory of the repository along with a configuration file. Let's have a look at one such configuration file to see what is required. Here is `000_test.yml`:```yamlvars : [QBP, QCBP, QIBP, TBP, VBP, PS, SOLIN, SHFLX, LHFLX, PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic]in_dir : /local/S.Rasp/sp32fbp_andkua/in_fns : AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-01-0*-00000.ncout_dir: /local/S.Rasp/preprocessed_data/out_fn: 000_train.ncval_in_fns: AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-02-0*-00000.ncval_out_fn: 000_valid.ncnorm_fn: 000_norm.nc```One new thing is that it is not necessary to specifically state the input and output variables at the preprocessing script. These will be chosen later in the data generator. This mean that `vars` can contain as many variables as possible. This should reduce the number of times the preprocessing has to be run.Some of the variables are saved directly in the model output, e.g. `PHQ`, while others, e.g. `QBP` have to be derived. Currently, the following derived variables are available: `*dt_adiabatic`, `*BP` and `PRECST`. If you want to add others, you have to implement it in `convert_dataset.py`.`in_dir` and `in_fns` describe the raw climate model file names. `out_dir` and `out_fn` denote the path to the preprocessed file. The training dataset will additionally be preshuffled.If `val_*` is given, a separate validation dataset will be created.If `norm_fn` is given, statistics will be computed from the training dataset, e.g. the mean and standard deviation. Note that for large training files this takes a very, very long time. Therefore, I recommend computing the normalization file on a small dataset and use it for the large training dataset.
###Code
!python preprocessing.py -c pp_config/000_test.yml
!ls /local/S.Rasp/preprocessed_data/000*
###Output
/local/S.Rasp/preprocessed_data/000_norm.nc
/local/S.Rasp/preprocessed_data/000_train.nc
/local/S.Rasp/preprocessed_data/000_train_shuffle.nc
/local/S.Rasp/preprocessed_data/000_valid.nc
###Markdown
Training the model step-by-stepLet's now go through the steps to train a neural network starting with the new data generator. Data generatorTo read the preprocessed file and feed the data to the neural net, we will use the `DataGenerator` class in the cbrain module. At this stage we will not define the variables we want in the input and output of the neural network.
###Code
in_vars = ['QBP', 'QCBP', 'QIBP', 'TBP', 'VBP',
'Qdt_adiabatic', 'QCdt_adiabatic', 'QIdt_adiabatic', 'Tdt_adiabatic', 'Vdt_adiabatic',
'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS', 'DTVKE',
'FSNT', 'FSNS', 'FLNT', 'FLNS', 'PRECT', 'PRECTEND', 'PRECST', 'PRECSTEN']
###Output
_____no_output_____
###Markdown
Next we need to think about how we want to normalize/scale the inputs and outputs. For the inputs we will use a pretty standard normalization, which we will get to later.As for the outputs, we would like to scale each variable physically. For this we will create a dictionary, which will contain a factor or a vector of factors for each variable.
###Code
scale_dict = {
'PHQ': L_V/G,
'PHCLDLIQ': L_V/G,
'PHCLDICE': L_V/G,
'TPHYSTND': C_P/G,
'QRL': C_P/G,
'QRS': C_P/G,
'DTVKE': C_P/G,
'FSNT': 1,
'FSNS': 1,
'FLNT': 1,
'FLNS': 1,
'PRECT': RHO_L*L_V,
'PRECTEND': 1e-3*RHO_L*L_V,
'PRECST': RHO_L*L_V,
'PRECSTEN': 1e-3*RHO_L*L_V
}
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1]; # Differential pressure [Pa]
for v in ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS']:
scale_dict[v] *= dP
scale_dict['DTVKE'] *= (dP/DT)
###Output
_____no_output_____
###Markdown
Now we will save this dictionary as a pickle file, so that we can later load it in th training script.
###Code
save_pickle('./nn_config/scale_dicts/001_toms_scaling.pkl', scale_dict)
###Output
_____no_output_____
###Markdown
With this dictionary, we can now use create a DataGenerator instance.A word about the normalization. The normalization is handled by Normalizer classes, defined in `normalization.py`. As default in `DataGenerator`, for input normalization the `InputNormalizer` class is used, while the outputs are scaled using the `DictNormalizer`. `DataGenerator` takes a tuple of strings for `input_transform`. This tuple describes which arrays from the normalization file (`norm_fn`) the input will be subtracted and divided by. If you want to create your own fancy normalization, you have to create such a Normalizer which has to have a `transform` method.
###Code
from cbrain.data_generator import DataGenerator
train_gen = DataGenerator(
data_fn = '/local/S.Rasp/preprocessed_data/000_train_shuffle.nc',
input_vars = in_vars,
output_vars = out_vars,
norm_fn = '/local/S.Rasp/preprocessed_data/000_norm.nc',
input_transform = ('mean', 'maxrs'),
output_transform = scale_dict,
batch_size=1024,
shuffle=True
)
X, Y = train_gen[0]; X.shape, Y.shape
###Output
_____no_output_____
###Markdown
Create a model with conservation layersNext we need to create a model. This is just basic Keras. I will show here how to use the conservation layers written by Tom. These layers require some additional input, in particular the normalization information.
###Code
from cbrain.layers import *
from tensorflow.keras.layers import *
inp_layer = Input(shape=(304,))
x = Dense(214, activation='elu')(inp_layer)
x = SurRadLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
x = MassConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
out_layer = EntConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
model = tf.keras.models.Model(inp_layer, out_layer)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 214) 65270 input_1[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
dense[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 65,270
Trainable params: 65,270
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Train the model with the weakly constrained energy lossAgain, this is basic Keras. Tom implemented the option to penalize the network for violating physical constraints, however, which we will do here.I implemented the weakly constrained loss as a class in `losses.py`. When initializing this class, you need to pass the input_tensor from the model and again the normalization information.This loss computes three losses internally: the standard MSE, a loss for mass conservation and a loss for enthalpy conservation. You can weight the losses by changing the `alpha_*` parameters.
###Code
from cbrain.losses import *
weak_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'])
mass_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=1, alpha_ent=0, name='mass_loss')
ent_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=0, alpha_ent=1, name='ent_loss')
model.compile(tf.keras.optimizers.Adam(lr=0.01), loss=weak_loss, metrics=[mass_loss, ent_loss, mse])
model.fit_generator(train_gen, epochs=5)
###Output
Epoch 1/5
3448/3448 [==============================] - 109s 32ms/step - loss: 740.1542 - mass_loss: 3.6446e-11 - ent_loss: 7.8155e-11 - mean_squared_error: 1480.3085
Epoch 2/5
3448/3448 [==============================] - 67s 20ms/step - loss: 540.9411 - mass_loss: 6.3662e-11 - ent_loss: 1.4316e-10 - mean_squared_error: 1081.8822
Epoch 3/5
3448/3448 [==============================] - 67s 19ms/step - loss: 419.6522 - mass_loss: 8.7082e-11 - ent_loss: 1.6894e-10 - mean_squared_error: 839.3044
Epoch 4/5
3448/3448 [==============================] - 66s 19ms/step - loss: 356.3085 - mass_loss: 1.0223e-10 - ent_loss: 1.9572e-10 - mean_squared_error: 712.6170
Epoch 5/5
3448/3448 [==============================] - 67s 19ms/step - loss: 322.0714 - mass_loss: 1.1291e-10 - ent_loss: 2.1134e-10 - mean_squared_error: 644.1428
###Markdown
Naturally, since we are using a conserving network, the conservation losses are basically zero. Puh... Train the network using the train.py scriptDoing the training in a notebook is good for experimentation but for testing different configurations and using large training datasets, we need a command line script, which is called `train.py`. So, let's check out how to use it.Again we will create a configuration file. This is `000_example.yml`:``` Example training configuration fileexp_name: 000_exampledata_dir: /local/S.Rasp/preprocessed_data/train_fn: 000_train_shuffle.ncvalid_fn: 000_valid.ncnorm_fn: 000_norm.ncinputs: [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]outputs: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]input_sub: meaninput_div: maxrsoutput_dict: /home/s/S.Rasp/repositories/CBRAIN-CAM/nn_config/scale_dicts/001_toms_scaling.pklhidden_layers: [128, 214]epochs: 10conservation_layer: Trueloss: weak_loss```Most of the arguments are pretty self-explanatory (I hope). If you are confused look at the definition at the bottom of `train.py`.This script automatically uses learning rate decay.Additionally, and very importantly for the eventual implementation in CAM, it also saves the network and weights.
###Code
!python train.py -c nn_config/000_example.yml
###Output
2019-04-03 15:23:20.846557: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 AVX512F FMA
2019-04-03 15:23:20.982342: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties:
name: GeForce GTX 1080 major: 6 minor: 1 memoryClockRate(GHz): 1.7335
pciBusID: 0000:b3:00.0
totalMemory: 7.93GiB freeMemory: 5.87GiB
2019-04-03 15:23:20.982378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:21.453378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:21.453417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:21.453425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:21.453721: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
04/03/2019 03:23:21 PM Create training and validation data generators
04/03/2019 03:23:21 PM Build model
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 256) 78080 input_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256) 0 dense[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 256) 65792 leaky_re_lu[0][0]
__________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 256) 0 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 256) 65792 leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 256) 0 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 214) 54998 leaky_re_lu_2[0][0]
__________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 214) 0 dense_3[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
leaky_re_lu_3[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 264,662
Trainable params: 264,662
Non-trainable params: 0
__________________________________________________________________________________________________
None
04/03/2019 03:23:21 PM Compile model
04/03/2019 03:23:22 PM Train model
2019-04-03 15:23:23.590239: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:23.590287: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:23.590296: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:23.590305: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:23.590595: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
Learning rate = 0.01
Epoch 1/10
3448/3448 [==============================] - 89s 26ms/step - loss: 66.6859 - mean_squared_error: 133.3718 - mass_loss: 3.4461e-10 - ent_loss: 3.2758e-10 - val_loss: 51.0592 - val_mean_squared_error: 102.1183 - val_mass_loss: 3.8388e-10 - val_ent_loss: 1.9052e-10
Learning rate = 0.01
Epoch 2/10
3448/3448 [==============================] - 82s 24ms/step - loss: 50.0245 - mean_squared_error: 100.0490 - mass_loss: 3.7326e-10 - ent_loss: 3.3711e-10 - val_loss: 50.6125 - val_mean_squared_error: 101.2250 - val_mass_loss: 4.0006e-10 - val_ent_loss: 2.0463e-10
Learning rate = 0.002
Epoch 3/10
3448/3448 [==============================] - 82s 24ms/step - loss: 44.0968 - mean_squared_error: 88.1936 - mass_loss: 3.8555e-10 - ent_loss: 3.3528e-10 - val_loss: 45.2651 - val_mean_squared_error: 90.5302 - val_mass_loss: 3.9009e-10 - val_ent_loss: 1.8705e-10
Learning rate = 0.002
Epoch 4/10
3448/3448 [==============================] - 82s 24ms/step - loss: 43.1805 - mean_squared_error: 86.3610 - mass_loss: 3.9110e-10 - ent_loss: 3.3402e-10 - val_loss: 44.7589 - val_mean_squared_error: 89.5178 - val_mass_loss: 3.6946e-10 - val_ent_loss: 1.7980e-10
Learning rate = 0.0004000000000000001
Epoch 5/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.5774 - mean_squared_error: 83.1547 - mass_loss: 3.9188e-10 - ent_loss: 3.3378e-10 - val_loss: 43.3000 - val_mean_squared_error: 86.5999 - val_mass_loss: 4.0520e-10 - val_ent_loss: 1.9103e-10
Learning rate = 0.0004000000000000001
Epoch 6/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.2948 - mean_squared_error: 82.5897 - mass_loss: 3.9295e-10 - ent_loss: 3.3513e-10 - val_loss: 43.2729 - val_mean_squared_error: 86.5458 - val_mass_loss: 3.8670e-10 - val_ent_loss: 1.8113e-10
Learning rate = 8.000000000000002e-05
Epoch 7/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8886 - mean_squared_error: 81.7773 - mass_loss: 3.9385e-10 - ent_loss: 3.3514e-10 - val_loss: 42.9063 - val_mean_squared_error: 85.8126 - val_mass_loss: 4.0482e-10 - val_ent_loss: 1.8694e-10
Learning rate = 8.000000000000002e-05
Epoch 8/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8127 - mean_squared_error: 81.6254 - mass_loss: 3.9597e-10 - ent_loss: 3.3652e-10 - val_loss: 42.8711 - val_mean_squared_error: 85.7422 - val_mass_loss: 4.0361e-10 - val_ent_loss: 1.8883e-10
Learning rate = 1.6000000000000003e-05
Epoch 9/10
3448/3448 [==============================] - 83s 24ms/step - loss: 40.7166 - mean_squared_error: 81.4333 - mass_loss: 3.9348e-10 - ent_loss: 3.3688e-10 - val_loss: 42.8298 - val_mean_squared_error: 85.6596 - val_mass_loss: 4.0140e-10 - val_ent_loss: 1.8511e-10
Learning rate = 1.6000000000000003e-05
Epoch 10/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.7036 - mean_squared_error: 81.4072 - mass_loss: 3.9575e-10 - ent_loss: 3.3639e-10 - val_loss: 42.8211 - val_mean_squared_error: 85.6422 - val_mass_loss: 4.0147e-10 - val_ent_loss: 1.8437e-10
04/03/2019 03:37:14 PM Saving model as ./saved_models/000_example/model.h5
###Markdown
We see that we are overfitting quite a bit which is to be expected with such a small dataset.
###Code
!ls ./saved_models/000_example/
###Output
inp_div.txt layer2_bias.txt layer4_bias.txt weights.h5
inp_sub.txt layer2_kernel.txt layer4_kernel.txt
layer1_bias.txt layer3_bias.txt model.h5
layer1_kernel.txt layer3_kernel.txt out_scale.txt
###Markdown
Model diagnosticsFinally, we would like to know how well our model does in more detail that just looking at the loss.For this I wrote the `ModelDiagnostics` class. It is designed to be convenient. Let's see what it can do.For basic usage it only needs two arguments: First, the configuration file used for neural network training and second the data file which is to be used for validation.
###Code
from cbrain.model_diagnostics.model_diagnostics import ModelDiagnostics
md = ModelDiagnostics('nn_config/000_example.yml', '/local/S.Rasp/preprocessed_data/000_valid.nc')
###Output
_____no_output_____
###Markdown
PlottingThe first thing we can do is plot the truth alongside the model prediction. These functions take all the regular matplotlib arguments.
###Code
md.plot_double_yz(itime=0, ilon=0, var='PHQ', vmin=-8e-8, vmax=8e-8, cmap='bwr');
md.plot_double_xy(0, 0, 'PRECT');
###Output
_____no_output_____
###Markdown
Compute statisticsLast but not least, we can compute statistics over the entire validation dataset.
###Code
md.compute_stats()
###Output
_____no_output_____
###Markdown
Now there is a stats dictionary containing many statistics. If you want to implement your own statistics, you wil have to do so in the `compute_stats` method.
###Code
md.stats.keys()
###Output
_____no_output_____
###Markdown
One common statistic, for example, is the R2. So let's plot that. The R2 is averaged over time but all other dimensions are still available. Further, the vertical level is still stacked, so we will have to figure out the variable indices.
###Code
md.stats['r2'].shape
plt.matshow(md.stats['r2'][:, :, md.get_output_var_idx('PHQ')].mean(1).T, vmin=0, vmax=1, cmap='Spectral')
plt.colorbar(shrink=0.7)
###Output
_____no_output_____
###Markdown
We also like looking at the horizontally averaged R2 for each variable.
###Code
md.stats['hor_r2'].shape
md.valid_gen.output_vars[:7]
# Get the vertical coordinate in pressure levels
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
P = (P[1:] + P[:-1]) / 2 / 100
fig, ax = plt.subplots(figsize=(8, 8))
for v in md.valid_gen.output_vars[:7]:
ax.plot(md.stats['hor_r2'][md.get_output_var_idx(v)], P, label=v)
ax.set_xlim(0, 1)
ax.invert_yaxis()
plt.legend()
###Output
_____no_output_____
###Markdown
a.itemsize ndarray.itemsizethe size in bytes of each element of the array. For example, an array of elements of type float64 has itemsize 8 (=64/8), while one of type complex32 has itemsize 4 (=32/8). It is equivalent to ndarray.dtype.itemsize.
###Code
a.ndim
a.data
###Output
_____no_output_____
###Markdown
ndarray.datathe buffer containing the actual elements of the array. Normally, we won’t need to use this attribute because we will access the elements in an array using indexing facilities.
###Code
type(a)
###Output
_____no_output_____
###Markdown
array creation
###Code
import numpy as np
s = np.array([2,3,4,3,22,34,56])
print(s)
type(s)
st = np.array((1,2,3,5,66,75,44))
st
type(st)
st.dtype
ss = np.arange(20, dtype=np.float32)
ss
ss.dtype #by default the numpy float is float64
ss.reshape(2,2,5)
ss.dtype
d = np.array([[3.4,44.5],[55.66,7.7]], dtype = complex)
d
d.imag
d.real
type(d)
d.dtype # by default the numpy complex is complex 128
d.shape
d.itemsize
d.data
d
d.T
d.shape
d.T.shape
t = np.array(((2,3,4,5),(44,56,77,88)), dtype = complex)
t
tt = np.array(((2,3,4,5),(44,56,77,88)), dtype = float)
tt
tt.dtype
import numpy as np
np.zeros((3,4), dtype = int)
np.eye(5,5,dtype=int)
np.ones((3,3),dtype=float)
np.empty((3,3), dtype = int)
np.arange(20)
f= np.arange(30,40,.2, dtype=float).reshape((10,5))
f.size
f
np.linspace(2,10,25, dtype= float).reshape((5,5))
import numpy as np
import matplotlib.pyplot as plt
a = np.linspace(0,20,200)
b = np.sin(a)
bb = np.exp(a)
plt.title("sine and exponential plot")
plt.plot(b,bb)
np.random.rand(3,3)
np.random.random((3,4))
np.random.randn(5,3)
np.random.randint(44,54)
np.random.randint((44,54))
np.random.randint(44)
f = np.random.normal()
f
np.random.normal(22)
np.random.normal((22,30))
np.random.normal(22,30)
type(f)
np.arange(2999)
import sys
np.set_printoptions(threshold=sys.maxsize)
###Output
_____no_output_____
###Markdown
Basic operations
###Code
import numpy as np
a = np.arange(4)
b= np.array([33,44,55,66])
c= b-a
c
b**3
10*np.sin(b)
a<33
a = np.array( [[1,1],
[0,1]] )
b = np.array( [[2,0],
[3,4]] )
a*b
a**b
a.dot(b)
a@b
a.dtype.name
ddd = np.random.rand(3,3)
ddd
ddd.dtype
ddd.dtype.name
ddd.sum()
ddd.min()
ddd.max()
ddd.mean()
ddd.std()
ddd.var()
cs = ddd.cumsum()
cs
plt.plot(cs,ddd.ravel(),c="r")
plt.title('Cumsum and original flatten data plot')
plt.xlabel("Cumulative sum")
plt.ylabel("Flattened array")
ml = np.array([[[2,22,33,43,3],[44,54,5,6,77]],
[[4,33,22,11,123],[6,77,56,4,37]]
])
ml
ml.ndim
ml.shape
type(ml)
ml.dtype
ml.sum(axis=0)
ml.sum(axis=2)
ml.sum(axis=1)
ml.min(axis=2)
ml.min(axis=1)
ml.max(axis=2)
ml.max(axis=1)
ml.cumsum(axis=2)
ml.cumsum(axis=1)
ml.mean(axis=2)
ml.mean(axis=1)
a= np.arange(3)
a
np.exp(a)
np.sqrt(a)
np.add(a,np.exp(a))
np.subtract(a,np.sqrt(a))
np.multiply(a,np.sum(a))
np.divide(a,np.exp(a))
w = np.arange(10)*2
w
w[:5]
w[::2]
w[:7:2]=-100
w
w
w[::-1]
for i in w:
print(i*(2/3), end ="\n")
def f(x,y):
return 10*x+y
b= np.fromfunction(f,(5,5),dtype=np.int)
b
b[2,4]
b[:3]
b[3:4]
b[:5,2]
b[:,2]
b[-1]
b[3]
b
for i in b.flat:
print(i)
###Output
0
1
2
3
4
10
11
12
13
14
20
21
22
23
24
30
31
32
33
34
40
41
42
43
44
###Markdown
column stack == hstack (only for 2D arrays) \nOn the other hand, the function row_stack is equivalent to vstack for any input arrays. In fact, row_stack is an alias for vstack:
###Code
np.column_stack is np.hstack
np.row_stack is np.vstack
import numpy as np
import matplotlib.pyplot as plt
# Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2
mu, sigma = 2, 0.5
v = np.random.normal(mu,sigma,2000)
#print(v)
# Plot a normalized histogram with 50 bins
plt.hist(v, bins=50, density=0) # matplotlib version (plot)
plt.show()
np.r_[1:4,0,4]
id(a)
b = np.random.random((2,3))
a *= 3
print(b)
a
b += a
b
a
b
a += b # b is not automatically converted to integer type
d=[]
for i in b:
for j in i:
d.append(j)
d
dd=[]
for i in d:
dd.append(np.floor(i))
dd
a+=dd
a
p = np.exp(a*1j)
p
p.dtype.name
def f(x,y):
return 10*x+y
b = np.fromfunction(f,(5,4),dtype=int)
b
b[:,3]
b[-1]
for i in range(10):
for j in range(i+1):
print("*", end='')
print()
for k in range(10,0,-1):
for jj in range(k):
print("&",end="")
print()
a= np.arange(10)
d=a.copy()
d.flags.owndata
d.base is a
d is a
d.shape =2,5
d.shape
d
a.shape
d[:1]=-100
d
a
a = np.arange(1e8)
d = a[:100].copy()
print(d)
del a
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64. 65. 66. 67. 68. 69. 70. 71.
72. 73. 74. 75. 76. 77. 78. 79. 80. 81. 82. 83. 84. 85. 86. 87. 88. 89.
90. 91. 92. 93. 94. 95. 96. 97. 98. 99.]
###Markdown
If b = a[:100] is used instead, a is referenced by b and will persist in memory even if del a is executed.
###Code
a = np.arange(12)**2 # the first 12 square numbers
i = np.array( [ 1,1,3,8,5 ] ) # an array of indices
a[i] # the elements of a at the positions i
j = np.array( [ [ 3, 4], [ 9, 7 ] ] ) # a bidimensional array of indices
a[j]
palette = np.array( [ [0,0,0], # black
[255,0,0], # red
[0,255,0], # green
[0,0,255], # blue
[255,255,255] ] ) # white
image = np.array( [ [ 0, 1, 2, 0 ], # each value corresponds to a color in the palette
[ 0, 3, 4, 0 ] ] )
palette[image] # the (2,4,3) color image
a = np.arange(12).reshape(3,4)
print(a)
i = np.array( [ [0,1],[1,2] ] ) # indices for the first dim of a
j = np.array( [ [2,1],[3,3] ] ) # indices for the second dim
a[i,j] # i and j must have equal shape
a[i,2]
a[:,j] # i.e., a[ : , j] ### Very important
###Output
_____no_output_____
###Markdown
Imports:
###Code
from samalg import SAM
from samalg.gui import SAMGUI #this is the interactive GUI
###Output
_____no_output_____
###Markdown
Initialize SAM
###Code
sam=SAM()
###Output
_____no_output_____
###Markdown
Load data from a file
###Code
sam.load_data('../example_data/schisto2.5_tpm.csv.gz')
###Output
_____no_output_____
###Markdown
Preprocess the data
###Code
sam.preprocess_data()
###Output
_____no_output_____
###Markdown
Run SAM
###Code
sam.run()
###Output
RUNNING SAM
Iteration: 0, Convergence: 0.4561882341279265
Iteration: 1, Convergence: 0.133654926038098
Iteration: 2, Convergence: 0.06908019207454287
Iteration: 3, Convergence: 0.020474718040976458
Iteration: 4, Convergence: 0.00790964979091267
Computing the UMAP embedding...
Elapsed time: 6.633909225463867 seconds
###Markdown
Look at the 2D UMAP projection
###Code
sam.scatter()
###Output
_____no_output_____
###Markdown
Launch the interactive GUI:
###Code
gui_obj = SAMGUI(sam)
gui_obj.SamPlot
###Output
_____no_output_____
###Markdown
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/> Install Dependencies
###Code
!pip install underthesea==1.3.1
!pip install torch==1.8.1
!pip install transformers==4.6.1
!pip install gdown==3.13.0
!git clone https://github.com/mtb-hust/Poem-Generator.git
cd Poem-Generator
###Output
_____no_output_____
###Markdown
Inference Generate poem with some words input We have 3 models: | Model | Model ID ||-----------------------|----------|| Word Level GPT2 | 0 || Syllable Level GPT2 | 1 || Our Custom Loss Model | 2 |
###Code
from ailamtho import PoemGenerator
# Please specify model_id
generator = PoemGenerator(model_id=0)
context_input = 'ai ơi'
poem = generator.generate_poem(context=context_input, n_stanzas=2)
print(poem)
###Output
_____no_output_____
###Markdown
Generate poem with some words input and desired topic There are total 5 poem topics we defined: Gia Dinh, Tinh Yeu, Dich Benh, Que Huong, Le Tet| Topic | Topic ID ||-----------|----------|| Gia Dinh | 0 || Tinh Yeu | 1 || Dich Benh | 2 || Que Huong | 3 || Le Tet | 4 |You can only choose one topic in each of generating a poem.However, you can choose another topic on next generating without having to recreate the ```generator``` object.
###Code
from ailamtho import ControlledPoemGenerator
generator = ControlledPoemGenerator()
context_input = 'cha mẹ'
poem = generator.generate_poem(context=context_input, topic_id=0)
print(poem)
###Output
_____no_output_____
###Markdown
Quickstart Guide: This Quickstart Guide presents a simple example of **ocean data challenge** for mapping the Sea Surface Height from sparse observations. The methodology is based on an Observing System Simulation Experiment (OSSE). The inputs data represent altimeter observations extracted from a realistic high-resolution ocean model simulation (NATL60). A simple mapping algorithm (Optimal Interpolation) is used to produce the reconstructed SSH field from the sparse observations. Finally, a comparison between the reconstructed and the reference SSH fields is done to quantify the reconstruction scores.Three experiments are carried out: > A) **Experiment 1**: demo. of reconstruction with **1 nadir altimeter**> B) **Experiment 2**: demo. of reconstruction with **4 nadirs altimeter**> C) **Experiment 3**: demo. of reconstruction with **1 SWOT altimeter**The notebook is structured as follows: 1) downloading the data 2) Setup configuration of the interpolation 3) Run the experiments 4) Plot the reconstruction scores for each experimentThis quickstart guide take approx. 30 min to run on a PC.
###Code
import xarray as xr
import numpy
import hvplot.xarray
import pyinterp
import dask
import warnings
import xrft
import logging
import pandas as pd
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
libraries versions
###Code
print('xarray', xr.__version__)
print('numpy', numpy.__version__)
print('hvplot', hvplot.__version__)
print('pyinterp', pyinterp.__version__)
print('dask', dask.__version__)
print('logging', logging.__version__)
print('xrft', xrft.__version__)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cluster = dask.distributed.LocalCluster()
client = dask.distributed.Client(cluster)
client
from src.mod_oi import *
from src.mod_inout import *
from src.mod_regrid import *
from src.mod_eval import *
from src.mod_plot import *
###Output
_____no_output_____
###Markdown
1- DOWNLOADING DATA Nature run SSH for mapping evaluation
###Code
%time
import gcsfs
fs = gcsfs.GCSFileSystem('pangeo-181919', requester_pays=True)
mapfilesref = fs.get_mapper('pangeo-meom/data-challenge-test/dc_ref')
dc_ref = xr.open_zarr(mapfilesref)
dc_ref
###Output
_____no_output_____
###Markdown
Synthetic SSH observation for OI mapping
###Code
!wget https://ige-meom-opendap.univ-grenoble-alpes.fr/thredds/fileServer/meomopendap/extract/ocean-data-challenges/dc_data1/dc_obs.tar.gz
!tar -xvf dc_obs.tar.gz
###Output
_____no_output_____
###Markdown
2- SETUP CONFIGURATION
###Code
# OI Grid
lon_min = -65.
lon_max = -55.
lat_min = 33.
lat_max = 43.
time_min = numpy.datetime64('2012-10-22')
time_max = numpy.datetime64('2012-12-02')
dx = 0.2 # zonal grid spatial step (in degree)
dy = 0.2 # meridional grid spatial step (in degree)
dt = numpy.timedelta64(1, 'D') # temporal grid step
simu_start_date = '2012-10-01T00:00:00' # Nature run initial date
glon = numpy.arange(lon_min, lon_max + dx, dx)
glat = numpy.arange(lat_min, lat_max + dy, dy)
gtime = numpy.arange(time_min, time_max + dt, dt)
# OI parameters
Lx = 1. # Zonal decorrelation scale (in degree)
Ly = 1. # Meridional decorrelation scale (in degree)
Lt = 7. # Temporal decorrelation scale (in days)
noise = 0.05 # Noise level (5%)
###Output
_____no_output_____
###Markdown
3- RUN EXPERIMENTS Select dc_ref sample
###Code
dc_ref_sample = dc_ref.sel(time=slice(time_min, time_max)).resample(time='1D').mean()
del dc_ref
dc_ref_sample
###Output
_____no_output_____
###Markdown
Define input data observations for each experiment
###Code
one_nadir = ['./dc_obs/2020a_SSH_mapping_NATL60_jason1.nc']
four_nadirs = ['./dc_obs/2020a_SSH_mapping_NATL60_envisat.nc',
'./dc_obs/2020a_SSH_mapping_NATL60_geosat2.nc',
'./dc_obs/2020a_SSH_mapping_NATL60_topex-poseidon_interleaved.nc',
'./dc_obs/2020a_SSH_mapping_NATL60_jason1.nc']
one_swot = ['./dc_obs/2020a_SSH_mapping_NATL60_karin_swot.nc', './dc_obs/2020a_SSH_mapping_NATL60_nadir_swot.nc']
###Output
_____no_output_____
###Markdown
- EXP. 1: Demo. OI 1 nadir
###Code
%%time
# set OI param & grid
ds_oi1_param = oi_param(Lx, Ly, Lt, noise)
ds_oi1_grid = oi_grid(glon, glat, gtime, simu_start_date)
# Read input obs + discard a bit...
coarsening = {'time': 5}
ds_oi1_obs = read_obs(one_nadir, ds_oi1_grid, ds_oi1_param, simu_start_date, coarsening)
# Run OI
for it in range(len(gtime)):
oi_core(it, ds_oi1_grid, ds_oi1_param, ds_oi1_obs)
# Regrid
ds_oi1_regrid = oi_regrid(ds_oi1_grid, dc_ref_sample)
# Eval
rmse_t_oi1, rmse_xy_oi1, leaderboard_nrmse, leaderboard_nrmse_std = rmse_based_scores(ds_oi1_regrid, dc_ref_sample)
psd_oi1, leaderboard_psds_score, leaderboard_psdt_score = psd_based_scores(ds_oi1_regrid, dc_ref_sample)
# Print leaderboard
data = [['OI 1 nadir',
leaderboard_nrmse,
leaderboard_nrmse_std,
leaderboard_psds_score,
leaderboard_psdt_score,
'Covariances not optimized',
'quickstart.ipynb']]
Leaderboard = pd.DataFrame(data,
columns=['Method',
r"$\overline{RMSE_{S}}$",
r"$\sigma(RMSE_{S})$",
r'$\lambda_{x}$ (degree)',
r'$\lambda_{t}$ (days)',
'Notes',
'Reference'])
print("Summary of the leaderboard metrics:")
Leaderboard
print(Leaderboard.to_markdown())
###Output
_____no_output_____
###Markdown
- EXP. 2: Demo. OI 4 nadirs
###Code
%%time
# set OI param & grid
ds_oi2_param = oi_param(Lx, Ly, Lt, noise)
ds_oi2_grid = oi_grid(glon, glat, gtime, simu_start_date)
# Read input obs + discard a bit...
coarsening = {'time': 5}
ds_oi2_obs = read_obs(four_nadirs, ds_oi2_grid, ds_oi2_param, simu_start_date, coarsening)
# Run OI
for it in range(len(gtime)):
oi_core(it, ds_oi2_grid, ds_oi2_param, ds_oi2_obs)
# Regrid
ds_oi2_regrid = oi_regrid(ds_oi2_grid, dc_ref_sample)
# Eval
rmse_t_oi2, rmse_xy_oi2, leaderboard_nrmse, leaderboard_nrmse_std = rmse_based_scores(ds_oi2_regrid, dc_ref_sample)
psd_oi2, leaderboard_psds_score, leaderboard_psdt_score = psd_based_scores(ds_oi2_regrid, dc_ref_sample)
# Print leaderboard
data = [['OI 4 nadirs',
leaderboard_nrmse,
leaderboard_nrmse_std,
leaderboard_psds_score,
leaderboard_psdt_score,
'Covariances not optimized',
'quickstart.ipynb']]
Leaderboard = pd.DataFrame(data,
columns=['Method',
r"$\overline{RMSE_{S}}$",
r"$\sigma(RMSE_{S})$",
r'$\lambda_{x}$ (degree)',
r'$\lambda_{t}$ (days)',
'Notes',
'Reference'])
print("Summary of the leaderboard metrics:")
Leaderboard
print(Leaderboard.to_markdown())
###Output
_____no_output_____
###Markdown
- EXP. 3: Demo. OI 1 swot
###Code
%%time
# set OI param & grid
ds_oi3_param = oi_param(Lx, Ly, Lt, noise)
ds_oi3_grid = oi_grid(glon, glat, gtime, simu_start_date)
# Read input obs + discard a bit...
coarsening = {'time': 15, 'nC': 6}
ds_oi3_obs = read_obs(one_swot[0], ds_oi3_grid, ds_oi3_param, simu_start_date, coarsening)
# Important line: vectorize for SWOT like data:
ds_oi3_obs = ds_oi3_obs.stack(z=('nC', 'time')).dropna(dim='z')
# Run OI
for it in range(len(gtime)):
oi_core(it, ds_oi3_grid, ds_oi3_param, ds_oi3_obs)
# Regrid
ds_oi3_regrid = oi_regrid(ds_oi3_grid, dc_ref_sample)
# Eval
rmse_t_oi3, rmse_xy_oi3, leaderboard_nrmse, leaderboard_nrmse_std = rmse_based_scores(ds_oi3_regrid, dc_ref_sample)
psd_oi3, leaderboard_psds_score, leaderboard_psdt_score = psd_based_scores(ds_oi3_regrid, dc_ref_sample)
# Print leaderboard
data = [['OI 1 swot',
leaderboard_nrmse,
leaderboard_nrmse_std,
leaderboard_psds_score,
leaderboard_psdt_score,
'Covariances not optimized',
'quickstart.ipynb']]
Leaderboard = pd.DataFrame(data,
columns=['Method',
r"$\overline{RMSE_{S}}$",
r"$\sigma(RMSE_{S})$",
r'$\lambda_{x}$ (degree)',
r'$\lambda_{t}$ (days)',
'Notes',
'Reference'])
print("Summary of the leaderboard metrics:")
Leaderboard
print(Leaderboard.to_markdown())
###Output
_____no_output_____
###Markdown
- PLOT EVALUATION SCORES
###Code
rmse_concat = xr.concat((rmse_t_oi1, rmse_t_oi2, rmse_t_oi3), dim='experiment')
rmse_concat['experiment'] = ["1 nadir", "4 nadirs", "1 SWOT"]
rmse_concat.hvplot.line(x='time', y='rmse_t', by='experiment', ylim=(0, 1), cmap=['royalblue', 'orange', 'lightcoral'], title='RMSE-based scores')
###Output
_____no_output_____
###Markdown
The figure above represent the RMSE-based score timeseries for the SSH reconstruction with 1 nadir, with 4 nadirs and with 1 SWOT. Several conclusions can be drawn: a) better score is found in the reconstruction with 4 nadirs than with 1 nadir, b) reconstruction with 1 SWOT or with 4 nadir are relatively equivalent, c) the variability of the SWOT score is higher than with 4 nadir, directly linked to the spatio-temporal sampling of the observations (See figure below for the number of observation per day in the OI)
###Code
nobs_concat = xr.concat((ds_oi1_grid.nobs, ds_oi2_grid.nobs, ds_oi3_grid.nobs), dim='experiment')
nobs_concat['experiment'] = ["1 nadir", "4 nadirs", "1 SWOT"]
nobs_concat.hvplot.bar(x='time', y='nobs', by='experiment', alpha=0.7, stacked=True, cmap=['orange', 'royalblue', 'lightcoral'], title='# obs in OI')
rmse_xy_concat = xr.concat((rmse_xy_oi1, rmse_xy_oi2, rmse_xy_oi3), dim='experiment')
rmse_xy_concat['experiment'] = ["1 nadir", "4 nadirs", "1 SWOT"]
rmse_xy_concat.hvplot.contourf(x='lon', y='lat', levels=list(numpy.arange(0.,0.75, 0.05)), height=300, width=400, cmap='Reds', subplots=True, by='experiment', clabel='RMSE[m]')
psd_concat = xr.concat((psd_oi1, psd_oi2, psd_oi3), dim='experiment')
psd_concat['experiment'] = ["1 nadir", "4 nadirs", "1 SWOT"]
plot_psd_score_v0(psd_concat)
###Output
_____no_output_____
###Markdown
Fairness Sandox Skeleton - QUICKSTART Easy to Run/Compile Version of the Skeleton Notebook SetupPlease run the code block below to install the necessary packages (if needed).
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
from collections import Counter
import fairlearn
from fairlearn.metrics import *
import aif360
import copy
###Output
_____no_output_____
###Markdown
Data
###Code
class Dataset:
def __init__(self, short_name = '', path = '', cat_cols = [], num_cols = []):
self.short_name = short_name
self.path = path
self.cat_cols = cat_cols
self.num_cols = num_cols
self.df = pd.read_csv(path, sep = ';')
# each dataset is a dictionary where keys = short name, values = Dataset object
datasets = dict()
def add_dataset(dataset):
if not isinstance(dataset, Dataset):
print("Error! Please enter a valid Dataset object")
else:
if dataset.short_name not in datasets.keys():
datasets[dataset.short_name] = dataset
# example - adding a dataset
path_adult_income = 'Datasets/adult.csv'
cat_cols = ['workclass', 'education','marital-status', 'occupation', 'relationship', 'race',
'gender', 'native-country','income']
num_cols = ['age', 'fnlwgt', 'educational-num', 'capital-gain', 'capital-loss', 'hours-per-week']
adult_income = Dataset('adult_income', path_adult_income, cat_cols, num_cols)
add_dataset(adult_income)
cat = ['school', 'sex', 'address','famsize','Pstatus','Mjob','Fjob','reason',
'guardian','schoolsup','famsup','paid', 'activities','nursery','higher', 'internet','romantic']
num = ['age', 'Medu', 'Fedu','traveltime','studytime','failures', 'famrel',
'freetime','goout','Dalc','Walc','health','absences','G1', 'G2', 'G3']
add_dataset(Dataset("student_mat", path='Datasets/student-mat.csv', cat_cols=cat, num_cols=num))
add_dataset(Dataset("student_por", path='Datasets/student-por.csv', cat_cols=cat, num_cols=num))
df_por = datasets['student_por'].df
###Output
_____no_output_____
###Markdown
EDA
###Code
def plot_counts(df, attr):
if attr in df.columns:
df[attr].value_counts(normalize=True).plot.barh()
else:
print("Error! Please enter a valid feature.")
# example
# plot_counts(df_por, 'sex')
def group_by_plot(df, attr1, attr2):
for val in list(df[attr1].unique()):
print(val)
temp = df[df[attr1] == val]
sns.displot(temp[attr2])
# example
# group_by_plot(df_por, 'sex', 'G3')
###Output
_____no_output_____
###Markdown
Problem Formulation
###Code
def threshold(df, g_1=0.3, g_2=0.3, g_3=0.4, threshold=11):
"""
Added "pass/fail" to make problem binary classification
"""
assert g_1 + g_2 + g_3 == 1, "The sum of the percentages should be 1"
assert 0 < threshold < 20, "Threshold needs to be between 0 and 20"
df['pass'] = df.apply(lambda row: 1
if g_1*row['G1'] + g_2*row['G2'] + g_3*row['G3'] >= threshold
else 0, axis=1)
threshold(df_por, threshold=14)
sens_attrs = [df_por['sex'], df_por['address']]
###Output
_____no_output_____
###Markdown
Data Preprocessing
###Code
# format data
X = df_por.iloc[:, :-2].values
y = df_por.iloc[:, -1].values
# OHE categorical features (prompt for user's choice here?)
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
# get indices of categorical columns
def get_cat_cols(dataset):
df = dataset.df
res = []
for col in dataset.cat_cols:
res.append(df.columns.get_loc(col))
return res
cat_cols = get_cat_cols(datasets['student_por'])
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), cat_cols)], remainder='passthrough')
X = np.array(ct.fit_transform(X))
X_true = X
y_true = df_por['pass']
count = pd.value_counts(df_por['address'], sort = True)
count.plot(kind = 'bar', rot = 0)
###Output
_____no_output_____
###Markdown
Under-Sampling Process
###Code
# separate based on protected attribute
sens_values = sens_attrs[1].unique()
# TODO - add prompt for user to specify which value is favored and which is unfavored
df_favored = df_por[df_por['address'] == 'U']
df_unfavored = df_por[df_por['address'] == 'R']
# under-sampling process
df_undersampled = df_unfavored.sample(n=190, random_state=42)
#print(df_favored.shape, df_unfavored.shape, df_undersampled.shape)
# combine undersampled and original favored class to create dataset
df_concat = pd.concat([df_favored,df_undersampled])
df_concat.shape
# for fairness measures later
df_sens = df_concat['address']
# format data
X_bias = df_concat.iloc[:, :-2].values
#print(X_undersampled.shape)
y_bias = df_concat.iloc[:, -1].values
# OHE
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), cat_cols)], remainder='passthrough')
X_bias_true = np.array(ct.fit_transform(X_bias))
y_bias_true = df_concat['pass']
#print(X_true.shape, X_bias_true.shape)
#print(y_true.value_counts(), "\n\n", y_bias_true.value_counts())
favored = len(df_favored)
true_unfavored = len(df_por[df_por['address'] == 'R'])
bias_unfavored = len(df_undersampled)
x_vals = ['Favored', "Unfavored"]
y_vals_true = [favored, true_unfavored]
y_vals_bias = [favored, bias_unfavored]
plt.subplot(1,2,1)
plt.bar(x_vals, y_vals_true)
plt.title("Ground Truth")
plt.ylabel("Count")
plt.subplot(1,2,2)
plt.bar(x_vals, y_vals_bias)
plt.title("Under-Sampling")
plt.ylim([0,500])
plt.show()
###Output
_____no_output_____
###Markdown
Model Selection + Training
###Code
# modularize and add data struct of different ml techniques
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(min_samples_leaf = 10, max_depth = 4)
classifier_true = classifier.fit(X_true, y_true)
y_pred_truth = classifier_true.predict(X_true)
classifier_bias = classifier.fit(X_bias_true, y_bias_true)
y_pred_bias = classifier_bias.predict(X_bias_true)
y_pred_bias_on_true = classifier_bias.predict(X_true)
print("Accuracy of Ground Truth Model on Ground Truth Data: ", accuracy_score(y_pred_truth, y_true))
print("Accuracy of Biased Model on Biased Data: ", accuracy_score(y_pred_bias, y_bias_true))
print("Accuracy of Biased Model on Ground Truth Data: ", accuracy_score(y_pred_bias_on_true, y_true))
###Output
Accuracy of Ground Truth Model on Ground Truth Data: 0.9845916795069337
Accuracy of Biased Model on Biased Data: 0.9844236760124611
Accuracy of Biased Model on Ground Truth Data: 0.9830508474576272
###Markdown
Thus, we can see that under-sampling decreases model performance on the ground truth data.
###Code
# Ground Truth Model on Ground Truth Data
gm_true = MetricFrame(accuracy_score, y_true, y_pred_truth, sensitive_features = sens_attrs[1])
print("Overall Accuracy: ", gm_true.overall)
print("Group Accuracy : ", gm_true.by_group)
print("\n")
sr_true = MetricFrame(selection_rate, y_true, y_pred_truth, sensitive_features = sens_attrs[1])
print("Overall Selection Rate: ", sr_true.overall)
print("Group Selection Rate : ", sr_true.by_group)
###Output
Overall Accuracy: 0.9845916795069337
Group Accuracy : address
R 0.979695
U 0.986726
Name: accuracy_score, dtype: object
Overall Selection Rate: 0.2110939907550077
Group Selection Rate : address
R 0.177665
U 0.225664
Name: selection_rate, dtype: object
###Markdown
Thus, we can see that there is a **5% discrepancy** in the selection rate for Rural vs Urban students (with Urban students being preferred)
###Code
# Biased Model on Biased Data
gm_bias = MetricFrame(accuracy_score, y_bias_true, y_pred_bias, sensitive_features = df_sens)
print("Overall Accuracy: ", gm_bias.overall)
print("Group Accuracy : ", gm_bias.by_group)
print("\n")
sr_bias = MetricFrame(selection_rate, y_bias_true, y_pred_bias, sensitive_features = df_sens)
print("Overall Selection Rate: ", sr_bias.overall)
print("Group Selection Rate : ", sr_bias.by_group)
###Output
Overall Accuracy: 0.9844236760124611
Group Accuracy : address
R 0.978947
U 0.986726
Name: accuracy_score, dtype: object
Overall Selection Rate: 0.21495327102803738
Group Selection Rate : address
R 0.168421
U 0.234513
Name: selection_rate, dtype: object
###Markdown
Thus, we can see that there is now a **7% discrepancy due to under-sampling** in the selection rate for Rural vs Urban students (with Urban students being preferred) on biased data
###Code
# Biased Model on Ground Truth Data
gm_bias_on_true = MetricFrame(accuracy_score, y_true,
y_pred_bias_on_true, sensitive_features = sens_attrs[1])
print("Overall Accuracy: ", gm_bias_on_true.overall)
print("Group Accuracy : ", gm_bias_on_true.by_group)
print("\n")
sr_bias_on_true = MetricFrame(selection_rate, y_true,
y_pred_bias_on_true, sensitive_features = sens_attrs[1])
print("Overall Selection Rate: ", sr_bias_on_true.overall)
print("Group Selection Rate : ", sr_bias_on_true.by_group)
###Output
Overall Accuracy: 0.9830508474576272
Group Accuracy : address
R 0.974619
U 0.986726
Name: accuracy_score, dtype: object
Overall Selection Rate: 0.21571648690292758
Group Selection Rate : address
R 0.172589
U 0.234513
Name: selection_rate, dtype: object
###Markdown
Thus, we see that the biased model has lower overall and subgroup accuracy on the ground truth data. Also, we can observe that the selection rate disparity increased ever so slightly with the biased model.
###Code
print("Ground Truth Model on Ground Truth Data")
print("EOD:", equalized_odds_difference(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]))
print("DPD:", demographic_parity_difference(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]))
print("EOR:", equalized_odds_ratio(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]))
print("DPR:", demographic_parity_ratio(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]))
print("\n")
print("Biased Model on Ground Truth Data")
print("EOD:", equalized_odds_difference(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1]))
print("DPD:", demographic_parity_difference(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1]))
print("EOR:", equalized_odds_ratio(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1]))
print("DPR:", demographic_parity_ratio(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1]))
###Output
Ground Truth Model on Ground Truth Data
EOD: 0.047169811320754707
DPD: 0.04799874219486994
EOR: 0.11994219653179189
DPR: 0.7872996914501842
Biased Model on Ground Truth Data
EOD: 0.015425865310954804
DPD: 0.061924441848973555
EOR: 0.35982658959537567
DPR: 0.7359448328704147
###Markdown
Fairness Visualization (Keep this?)
###Code
from fairlearn.widget import FairlearnDashboard
FairlearnDashboard(sensitive_features = df_por['sex'],
sensitive_feature_names = ['sex'],
y_true = y_true,
y_pred = {"initial model" : y_pred_truth})
from fairlearn.widget import FairlearnDashboard
FairlearnDashboard(sensitive_features = df_por['address'],
sensitive_feature_names = ['address'],
y_true = y_true,
y_pred = {"initial model" : y_pred_bias_on_true})
###Output
_____no_output_____
###Markdown
Fairness Intervention
###Code
from fairlearn.reductions import ExponentiatedGradient, DemographicParity
np.random.seed(0)
constraint = DemographicParity()
mitigator_true = ExponentiatedGradient(classifier_true, constraint)
mitigator_true.fit(X_true, y_true, sensitive_features = sens_attrs[1])
y_pred_mitigated_true = mitigator_true.predict(X_true)
constraint = DemographicParity()
mitigator_bias = ExponentiatedGradient(classifier_bias, constraint)
mitigator_bias.fit(X_bias_true, y_bias_true, sensitive_features = df_sens)
y_pred_mitigated_bias = mitigator_bias.predict(X_bias_true)
y_pred_mitigated_bias_on_true = mitigator_bias.predict(X_true)
###Output
_____no_output_____
###Markdown
Evaluation
###Code
print("Accuracy of Ground Truth Model + Fairness Intervention on Ground Truth Data: ",
accuracy_score(y_pred_mitigated_true, y_true))
print("Accuracy of Biased Model + Fairness Intervention on Ground Truth Data: ",
accuracy_score(y_pred_mitigated_bias_on_true, y_true))
# Ground Truth Model + Fairness Intervention on Ground Truth Data
gm_mitigated = MetricFrame(accuracy_score, y_true, y_pred_mitigated_true, sensitive_features = sens_attrs[1])
print("Overall Accuracy: ", gm_mitigated.overall)
print("Group Accuracy : ", gm_mitigated.by_group)
print("\n")
sr_mitigated = MetricFrame(selection_rate, y_true, y_pred_mitigated_true, sensitive_features = sens_attrs[1])
print("Overall Selection Rate: ", sr_mitigated.overall)
print("Group Selection Rate : ", sr_mitigated.by_group)
# Biased Model + Fairness Intervention on Ground Truth Data
gm_mitigated_bias_on_true = MetricFrame(accuracy_score, y_true, y_pred_mitigated_bias_on_true, sensitive_features = sens_attrs[1])
print("Overall Accuracy: ", gm_mitigated_bias_on_true.overall)
print("Group Accuracy : ", gm_mitigated_bias_on_true.by_group)
print("\n")
sr_mitigated_bias_on_true = MetricFrame(selection_rate, y_true, y_pred_mitigated_bias_on_true, sensitive_features = sens_attrs[1])
print("Overall Selection Rate: ", sr_mitigated_bias_on_true.overall)
print("Group Selection Rate : ", sr_mitigated_bias_on_true.by_group)
###Output
Overall Accuracy: 0.975346687211094
Group Accuracy : address
R 0.949239
U 0.986726
Name: accuracy_score, dtype: object
Overall Selection Rate: 0.22033898305084745
Group Selection Rate : address
R 0.208122
U 0.225664
Name: selection_rate, dtype: object
###Markdown
Hence, we observe an increase in overall and subgroup accuracy, but a decrease in disadvantaged subgroup selection rate
###Code
FairlearnDashboard(sensitive_features = sens_attrs[1],
sensitive_feature_names = ['address'],
y_true = y_true,
y_pred = {"initial model" : y_pred_truth, "mitigated model": y_pred_mitigated_true})
###Output
_____no_output_____
###Markdown
Bias vs Accuracy vs Fairness Trade-Off
###Code
# if verbose, shows "Finished iteration: ... "
# if apply_fairness, uses fairness intervention
def tradeoff_visualization(classifier, apply_fairness = False, verbose = False):
bias_amts = list(range(0,200,10))
accuracy_on_true = []
accuracy_on_biased = []
eod_on_true = []
eod_on_biased = []
dataset_size_true = np.full(shape=len(bias_amts), fill_value= X_true.shape[0]).tolist()
dataset_size_bias = []
table = []
classifier_true = classifier.fit(X_true, y_true)
y_pred_truth = classifier_true.predict(X_true)
df_undersampled = df_unfavored.sample(n=len(df_unfavored), random_state=42)
for i in range(20):
# under-sampling process
if i == 0:
df_undersampled = df_undersampled.sample(n=len(df_undersampled), random_state=42)
else:
df_undersampled = df_undersampled.sample(n=len(df_undersampled)-10, random_state=42)
# combine undersampled and original favored class to create dataset
df_concat = pd.concat([df_favored,df_undersampled])
df_concat.shape
df_sens = df_concat['address']
# format data
X_bias = df_concat.iloc[:, :-2].values
y_bias = df_concat.iloc[:, -1].values
# OHE
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), cat_cols)], remainder='passthrough')
X_bias_true = np.array(ct.fit_transform(X_bias))
y_bias_true = df_concat['pass']
dataset_size_bias.append(X_bias_true.shape[0])
classifier_bias = classifier.fit(X_bias_true, y_bias_true)
if apply_fairness:
constraint = DemographicParity()
mitigator_bias = ExponentiatedGradient(classifier_bias, constraint)
mitigator_bias.fit(X_bias_true, y_bias_true, sensitive_features = df_sens)
y_pred_bias = classifier_bias.predict(X_bias_true)
y_pred_bias_on_true = mitigator_bias.predict(X_true)
else:
y_pred_bias = classifier_bias.predict(X_bias_true)
y_pred_bias_on_true = classifier_bias.predict(X_true)
# model performance
acc_bias = accuracy_score(y_pred=y_pred_bias, y_true=y_bias_true)
accuracy_on_biased.append(acc_bias)
acc_bias_on_true = accuracy_score(y_pred=y_pred_bias_on_true, y_true=y_true)
accuracy_on_true.append(acc_bias_on_true)
# fairness performance
eod_true = equalized_odds_difference(y_true=y_bias_true, y_pred = y_pred_bias, sensitive_features=df_sens)
eod_on_true.append(eod_true)
eod_bias_on_true = equalized_odds_difference(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1])
eod_on_biased.append(eod_bias_on_true)
# table visualization
table_elem = [i*10, acc_bias, acc_bias_on_true]
table.append(table_elem)
if verbose:
print("Finished Iteration: ", len(df_concat))
return bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_biased, eod_on_true
def visualizations(bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true,
eod_on_true, eod_on_biased, fairness = False):
if not fairness:
plt.figure(figsize=(17,7))
plt.subplot(1,2,1)
plt.plot(bias_amts, accuracy_on_true, label = 'Ground Truth')
plt.plot(bias_amts, accuracy_on_biased, label = 'Biased Data')
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Accuracy Score")
plt.axhline(y=accuracy_score(y_pred_truth, y_true), color = "green", label = "Ground Truth Model Accuracy", alpha = 0.5)
plt.title("Biased Model Accuracy")
plt.ylim(0.92, 0.99)
plt.legend()
plt.subplot(1,2,2)
plt.plot(bias_amts, dataset_size_true, label = 'Ground Truth')
plt.plot(bias_amts, dataset_size_bias, label = 'Biased Data')
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Dataset Size")
plt.legend()
plt.show()
else:
plt.plot(bias_amts, eod_on_true, label = 'Ground Truth')
plt.plot(bias_amts, eod_on_biased, label = 'Biased Data')
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Equalized Odds Difference")
plt.axhline(y=equalized_odds_difference(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]), color = "green",
label = "Ground Truth EOD", alpha = 0.5)
plt.legend()
plt.title("Biased Model Equalized Odds Difference")
plt.show()
#plt.savefig("bias1.png")
bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_biased, eod_on_true = tradeoff_visualization(classifier, False, False)
visualizations(bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_true, eod_on_biased, False)
visualizations(bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_true, eod_on_biased, True)
bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_biased, eod_on_true = tradeoff_visualization(classifier, True, False)
visualizations(bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_true, eod_on_biased, False)
visualizations(bias_amts, dataset_size_true, dataset_size_bias, accuracy_on_biased, accuracy_on_true, eod_on_true, eod_on_biased, True)
###Output
_____no_output_____
###Markdown
Special Visualization
###Code
bias_amts = list(range(0,200,10))
accuracy_on_true = []
accuracy_on_biased = []
accuracy_on_true_mitigated = []
eod_on_true = []
eod_on_biased = []
eod_on_true_mitigated = []
dataset_size_true = np.full(shape=len(bias_amts), fill_value= X_true.shape[0]).tolist()
dataset_size_bias = []
table = []
classifier_true = classifier.fit(X_true, y_true)
y_pred_truth = classifier_true.predict(X_true)
df_undersampled = df_unfavored.sample(n=len(df_unfavored), random_state=42)
for i in range(20):
# under-sampling process
if i == 0:
df_undersampled = df_undersampled.sample(n=len(df_undersampled), random_state=42)
else:
df_undersampled = df_undersampled.sample(n=len(df_undersampled)-10, random_state=42)
# combine undersampled and original favored class to create dataset
df_concat = pd.concat([df_favored,df_undersampled])
df_concat.shape
df_sens = df_concat['address']
# format data
X_bias = df_concat.iloc[:, :-2].values
y_bias = df_concat.iloc[:, -1].values
# OHE
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), cat_cols)], remainder='passthrough')
X_bias_true = np.array(ct.fit_transform(X_bias))
y_bias_true = df_concat['pass']
dataset_size_bias.append(X_bias_true.shape[0])
classifier_bias = classifier.fit(X_bias_true, y_bias_true)
constraint = DemographicParity()
mitigator_bias = ExponentiatedGradient(classifier_bias, constraint)
mitigator_bias.fit(X_bias_true, y_bias_true, sensitive_features = df_sens)
y_pred_mitigated_bias = classifier_bias.predict(X_bias_true)
y_pred_mitigated_bias_on_true = mitigator_bias.predict(X_true)
y_pred_bias = classifier_bias.predict(X_bias_true)
y_pred_bias_on_true = classifier_bias.predict(X_true)
# model performance
acc_bias = accuracy_score(y_pred=y_pred_bias, y_true=y_bias_true)
accuracy_on_biased.append(acc_bias)
acc_bias_on_true = accuracy_score(y_pred=y_pred_bias_on_true, y_true=y_true)
accuracy_on_true.append(acc_bias_on_true)
acc_bias_mitigated_on_true = accuracy_score(y_pred=y_pred_mitigated_bias_on_true, y_true=y_true)
accuracy_on_true_mitigated.append(acc_bias_mitigated_on_true)
# fairness performance
eod_true = equalized_odds_difference(y_true=y_bias_true, y_pred = y_pred_bias, sensitive_features=df_sens)
eod_on_true.append(eod_true)
eod_bias_on_true = equalized_odds_difference(y_true=y_true, y_pred = y_pred_bias_on_true, sensitive_features=sens_attrs[1])
eod_on_biased.append(eod_bias_on_true)
eod_bias_true_mitigated = equalized_odds_difference(y_true=y_true, y_pred = y_pred_mitigated_bias_on_true, sensitive_features=sens_attrs[1])
eod_on_true_mitigated.append(eod_bias_true_mitigated)
plt.figure(figsize=(17,7))
plt.subplot(1,2,1)
plt.plot(bias_amts, accuracy_on_biased, label = 'On Biased Data + No Fairness Intervention', color = "red")
plt.plot(bias_amts, accuracy_on_true, label = 'On Ground Truth + No Fairness Intervention', color = "blue")
plt.plot(bias_amts, accuracy_on_true_mitigated, label = 'On Ground Truth + Fairness Intervention', color = "purple")
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Accuracy Score")
plt.axhline(y=accuracy_score(y_pred_truth, y_true), color = "green", label = "Ground Truth Model On Ground Truth Data", alpha = 0.5)
plt.title("Accuracy of Biased Model (trained on biased data) \n\nNote: ground truth model trained on ground truth data means \na barebones DT classifier trained on unbiased data")
plt.ylim(0.92, 0.99)
plt.legend()
plt.subplot(1,2,2)
plt.plot(bias_amts, dataset_size_true, label = 'Ground Truth')
plt.plot(bias_amts, dataset_size_bias, label = 'Biased Data')
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Dataset Size")
plt.legend()
plt.show()
plt.plot(bias_amts, eod_on_biased, label = 'On Biased Data + No Fairness Intervention', color = "red")
plt.plot(bias_amts, eod_on_true, label = 'On Ground Truth + No Fairness Intervention', color = "blue")
plt.plot(bias_amts, eod_on_true_mitigated, label = 'On Ground Truth + Fairness Intervention', color = "purple")
plt.xlabel("Amount of Bias (number of minority samples removed)")
plt.ylabel("Equalized Odds Difference")
plt.axhline(y=equalized_odds_difference(y_true=y_true, y_pred = y_pred_truth, sensitive_features=sens_attrs[1]), color = "green",
label = "Ground Truth EOD", alpha = 0.5)
plt.legend()
plt.title("Biased Model Equalized Odds Difference")
plt.show()
#plt.savefig("bias1.png")
###Output
_____no_output_____
###Markdown
we can also save the positions in a Gadget format binaries by setting return_pos to False
###Code
ng = 256
boxsize = 256.
twolpt = muscle.muscle(scheme='2lpt',z_pk=50,boxsize=boxsize,ng=ng,redshift=0.,return_pos=False)
pos2lpt = twolpt.generate()
###Output
using 2lpt
creating the folder sims/
creating the folder sims/bx256_ng256_z0.0_Om0.30/
creating the folder sims/bx256_ng256_z0.0_Om0.30/2lpt/
written binaries in sims/bx256_ng256_z0.0_Om0.30/2lpt/z0__0.dat
###Markdown
GTFSUtils Quickstart Table of Contents- Download Vienna GTFS- Show gtfsutils Help- Show Information about GTFS File- Load GTFS File- Visualize Routes- Visualize Shapes- Filter by Geometry (Within)- Filter by Geometry (Intersects)- Filter by Agency- Save Filtered GTFS File Download Vienna GTFS- Open Data Austria - [Wiener Linien – Fahrplandaten GTFS Wien](https://www.data.gv.at/katalog/dataset/ab4a73b6-1c2d-42e1-b4d9-049e04889cf0)
###Code
%%bash
mkdir -p data
if [ ! -f "data/vienna.gtfs.zip" ]; then
wget http://www.wienerlinien.at/ogd_realtime/doku/ogd/gtfs/gtfs.zip \
-qO data/vienna.gtfs.zip
fi
###Output
_____no_output_____
###Markdown
Show gtfsutils Help
###Code
%%bash
gtfsutils --help
###Output
usage: gtfsutils [-h] [-i SRC] [-o DST] [--bounds BOUNDS] [-f OPERATION]
[--overwrite] [-v]
method
GTFS Utilities
positional arguments:
method GTFS method: filter, bounds, info, merge
optional arguments:
-h, --help show this help message and exit
-i SRC, --input SRC Input filepath
-o DST, --output DST Output filepath
--bounds BOUNDS Filter boundary
-f OPERATION, --filter-operation OPERATION
Filter operation (within, intersects)
--overwrite Overwrite if exists
-v, --verbose Verbose output
###Markdown
Show Information about GTFS File
###Code
%%bash
gtfsutils info \
--input data/vienna.gtfs.zip
###Output
GTFS files:
agency.txt 2 rows
calendar.txt 247 rows
calendar_dates.txt 11,431 rows
routes.txt 613 rows
shapes.txt 476,277 rows
stop_times.txt 4,947,449 rows
stops.txt 4,496 rows
trips.txt 268,377 rows
Calender date range:
13.12.2020 - 11.12.2021
Bounding box:
[16.1977025532707, 47.9995020902886, 16.5494019702052, 48.3011051975429]
###Markdown
Load GTFS File
###Code
import shapely.geometry
import geopandas as gpd
import matplotlib.pyplot as plt
import gtfsutils
import gtfsutils.filter
import gtfsutils.routes
print(f"gtfsutils {gtfsutils.__version__}")
%%time
bounds = [16.398134, 48.1352198, 16.5775132, 48.2935324]
gdf_bounds = gpd.GeoSeries(
shapely.geometry.box(*bounds),
crs='EPSG:4326').boundary
filepath = "data/vienna.gtfs.zip"
df_dict = gtfsutils.load_gtfs(filepath)
# Show available dataframes
df_dict.keys()
gtfsutils.print_info(df_dict)
###Output
GTFS files:
agency.txt 2 rows
calendar.txt 247 rows
calendar_dates.txt 11,431 rows
routes.txt 613 rows
shapes.txt 476,277 rows
stop_times.txt 4,947,449 rows
stops.txt 4,496 rows
trips.txt 268,377 rows
Calender date range:
13.12.2020 - 11.12.2021
Bounding box:
[16.1977025532707, 47.9995020902886, 16.5494019702052, 48.3011051975429]
###Markdown
Visualize Routes
###Code
%%time
gdf_routes = gtfsutils.routes.load_routes_counts(df_dict)
fig, ax = plt.subplots(figsize=(8, 8))
gdf_plot = gdf_routes.to_crs(epsg=3857)
gdf_plot.plot(
ax=ax, linewidth=1, alpha=1,
column='counts', cmap='viridis', scheme='quantiles',
legend=True, legend_kwds={'loc': 'lower right'})
ax.set_title("GTFS Route Counts");
###Output
_____no_output_____
###Markdown
Visualize Shapes
###Code
%%time
gdf = gtfsutils.load_shapes(df_dict)
fig, ax = plt.subplots(figsize=(12, 8))
gdf.to_crs(epsg=3857).plot(ax=ax, linewidth=0.1, alpha=0.5)
gdf_bounds.to_crs(epsg=3857).plot(ax=ax, color='r')
ax.set_title("GTFS Shapes");
###Output
_____no_output_____
###Markdown
Filter by Geometry (Within)
###Code
%%time
gtfsutils.filter.filter_by_geometry(
df_dict, bounds, operation='within')
%%time
gdf = gtfsutils.load_shapes(df_dict)
fig, ax = plt.subplots(figsize=(12, 8))
gdf.to_crs(epsg=3857).plot(ax=ax, linewidth=0.1, alpha=0.5)
gdf_bounds.to_crs(epsg=3857).plot(ax=ax, color='r')
ax.set_title("GTFS Within Bounds");
###Output
_____no_output_____
###Markdown
Filter by Geometry (Intersects)
###Code
%%time
filepath = "data/vienna.gtfs.zip"
df_dict = gtfsutils.load_gtfs(filepath)
%%time
gtfsutils.filter.filter_by_geometry(
df_dict, bounds, operation='intersects')
%%time
gdf = gtfsutils.load_shapes(df_dict)
fig, ax = plt.subplots(figsize=(12, 8))
gdf.to_crs(epsg=3857).plot(ax=ax, linewidth=0.1, alpha=0.5)
gdf_bounds.to_crs(epsg=3857).plot(ax=ax, color='r')
ax.set_title("GTFS Intersects Bounds");
###Output
_____no_output_____
###Markdown
Filter by Agency
###Code
%%time
filepath = "data/vienna.gtfs.zip"
df_dict = gtfsutils.load_gtfs(filepath)
df_dict['agency']
%%time
gtfsutils.filter.filter_by_agency_ids(df_dict, [4])
%%time
gdf = gtfsutils.load_shapes(df_dict)
fig, ax = plt.subplots(figsize=(10, 6))
gdf.to_crs(epsg=3857).plot(ax=ax, linewidth=0.1, alpha=0.5)
ax.set_title("GTFS Filtered by Agency");
###Output
_____no_output_____
###Markdown
Save Filtered GTFS File
###Code
%%time
filepath = "data/vienna-filtered.gtfs.zip"
gtfsutils.save_gtfs(df_dict, filepath, ignore_required=True, overwrite=True)
###Output
CPU times: user 19.8 s, sys: 1.07 s, total: 20.9 s
Wall time: 47.2 s
###Markdown
Quickstart guideIn this notebook we will through all the steps from downloading the data and training a model to evaluating the results. Check out the `environment.yml` file for the required Python packages.
###Code
import xarray as xr
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Downloading the dataThe data is hosted here. For this guide we will simply download the 500 hPa geopotential data (Z500).
###Code
# This might take a few minutes
!wget "https://dataserv.ub.tum.de/s/m1524895/download?path=%2F5.625deg%2Fgeopotential_500&files=geopotential_500_5.625deg.zip" -O geopotential_500_5.625deg.zip
!mkdir -p geopotential_500
!unzip -d geopotential_500/ geopotential_500_5.625deg.zip
###Output
Archive: geopotential_500_5.625deg.zip
inflating: geopotential_500/geopotential_500hPa_1979_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1980_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1981_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1982_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1983_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1984_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1985_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1986_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1987_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1988_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1989_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1990_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1991_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1992_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1993_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1994_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1995_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1996_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1997_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1998_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1999_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2000_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2001_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2002_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2003_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2004_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2005_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2006_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2007_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2008_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2009_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2010_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2011_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2012_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2013_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2014_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2015_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2016_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2017_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2018_5.625deg.nc
###Markdown
Open the data
###Code
z500 = xr.open_mfdataset('geopotential_500/*.nc', combine='by_coords')
z500
# Plot an example
z500.z.isel(time=0).plot();
###Output
_____no_output_____
###Markdown
Create a simple climatological forecastRemember that we are using the years 2017 and 2018 for testing/evaluation, so we are not allowed to use these years to train any data-driven model.For more information on the climatology and persistence forecasts used in the paper check out `notebooks/1-climatology-persistence.ipynb`.
###Code
# To speed things up we will just take the mean for 2016
climatology = z500.sel(time=slice('2016', '2016')).mean('time').load()
climatology.z.plot()
###Output
_____no_output_____
###Markdown
Evaluate the climatologyPlease check the paper for details on the evaluation metric. Here we will use the functions from `src/score.py`. To make sure we are always using the same targets for testing, we also implemented a function to load the test data.
###Code
from src.score import *
z500_test = load_test_data('geopotential_500/', 'z')[::12] # Take data only every 12 hours to speed up computation on Binder
rmse_climatology = compute_weighted_rmse(climatology.z, z500_test).load()
rmse_climatology
###Output
_____no_output_____
###Markdown
So we get a climatological RMSE of 1080 m^2/s^2 which is very similar to the RMSE we get for the climatology for all training years. Train a neural networkNow let's train a simple convolutional neural network. We are using several functions defined in `src/train_nn.py`. You can use and modify these or write your own function for data loading etc. For more information on the the networks check out `notebooks/3-cnn-example.ipynb`.
###Code
from src.train_nn import *
# This limits TF memory usage on the GPU
# limit_mem()
###Output
_____no_output_____
###Markdown
First, we need to create the data generators for training, validation and testing. The main reason why we are using data generators instead of just loading the data as Numpy arrays is that this would require loading the same data twice since the features and targets are the same fields, just offset in time.
###Code
bs = 32
lead_time = 5*24
var_dict = {'z': None}
# Use 2015 for training and 2016 for validation
dg_train = DataGenerator(
z500.sel(time=slice('2015', '2015')), var_dict, lead_time, batch_size=bs, load=True)
dg_valid = DataGenerator(
z500.sel(time=slice('2016', '2016')), var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
# Now also a generator for testing. Impartant: Shuffle must be False!
dg_test = DataGenerator(z500.sel(time=slice('2017', '2018')).isel(time=slice(0, None, 12)), # Limiting the data for Binder
var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
X, y = dg_train[0]
# Batches have dimensions [batch_size, lat, lon, channels]
X.shape, y.shape
###Output
_____no_output_____
###Markdown
Now let's build a simple fully convolutional network. We are using periodic convolutions in the longitude direction. These are defined in `train_nn.py`.
###Code
cnn = keras.models.Sequential([
PeriodicConv2D(filters=32, kernel_size=5, conv_kwargs={'activation':'relu'}, input_shape=(32, 64, 1,)),
PeriodicConv2D(filters=1, kernel_size=5)
])
cnn.summary()
cnn.compile(keras.optimizers.Adam(1e-4), 'mse')
# Train a little bit ;)
cnn.fit(dg_train, epochs=1, validation_data=dg_valid)
###Output
270/270 [==============================] - 6s 21ms/step - loss: 1.7766 - val_loss: 1.0282
###Markdown
Create a prediction and compute scoreNow that we have a model (albeit a crappy one) we can create a prediction. For this we need to create a forecast for each forecast initialization time in the testing range (2017-2018) and unnormalize it. We then convert the forecasts to a Xarray dataset which allows us to easily compute the RMSE. All of this is taken care of in the `create_predictions()` function.
###Code
preds = create_predictions(cnn, dg_test)
preds
compute_weighted_rmse(preds.z, z500_test).load()
time = '2017-03-02T00'
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
z500_test.sel(time=time).plot(ax=ax1)
preds.sel(time=time).z.plot(ax=ax2);
###Output
_____no_output_____
###Markdown
A brief explanation of the revamped CBRAIN moduleIn this guide, we will go through the steps required to preprocess the raw climate model output, train a neural network and then analyze how good it is.
###Code
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
###Output
_____no_output_____
###Markdown
PreprocessingPreprocessing works using the `preprocessing.py` script in the main directory of the repository along with a configuration file. Let's have a look at one such configuration file to see what is required. Here is `000_test.yml`:```yamlvars : [QBP, QCBP, QIBP, TBP, VBP, PS, SOLIN, SHFLX, LHFLX, PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic]in_dir : /local/S.Rasp/sp32fbp_andkua/in_fns : AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-01-0*-00000.ncout_dir: /local/S.Rasp/preprocessed_data/out_fn: 000_train.ncval_in_fns: AndKua_aqua_SPCAM3.0_sp_fbp32.cam2.h1.0000-02-0*-00000.ncval_out_fn: 000_valid.ncnorm_fn: 000_norm.nc```One new thing is that it is not necessary to specifically state the input and output variables at the preprocessing script. These will be chosen later in the data generator. This mean that `vars` can contain as many variables as possible. This should reduce the number of times the preprocessing has to be run.Some of the variables are saved directly in the model output, e.g. `PHQ`, while others, e.g. `QBP` have to be derived. Currently, the following derived variables are available: `*dt_adiabatic`, `*BP` and `PRECST`. If you want to add others, you have to implement it in `convert_dataset.py`.`in_dir` and `in_fns` describe the raw climate model file names. `out_dir` and `out_fn` denote the path to the preprocessed file. The training dataset will additionally be preshuffled.If `val_*` is given, a separate validation dataset will be created.If `norm_fn` is given, statistics will be computed from the training dataset, e.g. the mean and standard deviation. Note that for large training files this takes a very, very long time. Therefore, I recommend computing the normalization file on a small dataset and use it for the large training dataset.
###Code
!python preprocessing.py -c pp_config/000_test.yml
!ls /local/S.Rasp/preprocessed_data/000*
###Output
/local/S.Rasp/preprocessed_data/000_norm.nc
/local/S.Rasp/preprocessed_data/000_train.nc
/local/S.Rasp/preprocessed_data/000_train_shuffle.nc
/local/S.Rasp/preprocessed_data/000_valid.nc
###Markdown
Training the model step-by-stepLet's now go through the steps to train a neural network starting with the new data generator. Data generatorTo read the preprocessed file and feed the data to the neural net, we will use the `DataGenerator` class in the cbrain module. At this stage we will not define the variables we want in the input and output of the neural network.
###Code
in_vars = ['QBP', 'QCBP', 'QIBP', 'TBP', 'VBP',
'Qdt_adiabatic', 'QCdt_adiabatic', 'QIdt_adiabatic', 'Tdt_adiabatic', 'Vdt_adiabatic',
'PS', 'SOLIN', 'SHFLX', 'LHFLX']
out_vars = ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS', 'DTVKE',
'FSNT', 'FSNS', 'FLNT', 'FLNS', 'PRECT', 'PRECTEND', 'PRECST', 'PRECSTEN']
###Output
_____no_output_____
###Markdown
Next we need to think about how we want to normalize/scale the inputs and outputs. For the inputs we will use a pretty standard normalization, which we will get to later.As for the outputs, we would like to scale each variable physically. For this we will create a dictionary, which will contain a factor or a vector of factors for each variable.
###Code
scale_dict = {
'PHQ': L_V/G,
'PHCLDLIQ': L_V/G,
'PHCLDICE': L_V/G,
'TPHYSTND': C_P/G,
'QRL': C_P/G,
'QRS': C_P/G,
'DTVKE': C_P/G,
'FSNT': 1,
'FSNS': 1,
'FLNT': 1,
'FLNS': 1,
'PRECT': RHO_L*L_V,
'PRECTEND': 1e-3*RHO_L*L_V,
'PRECST': RHO_L*L_V,
'PRECSTEN': 1e-3*RHO_L*L_V
}
# Takes representative value for PS since purpose is normalization
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
dP = P[1:]-P[:-1]; # Differential pressure [Pa]
for v in ['PHQ', 'PHCLDLIQ', 'PHCLDICE', 'TPHYSTND', 'QRL', 'QRS']:
scale_dict[v] *= dP
scale_dict['DTVKE'] *= (dP/DT)
###Output
_____no_output_____
###Markdown
Now we will save this dictionary as a pickle file, so that we can later load it in th training script.
###Code
save_pickle('./nn_config/scale_dicts/001_toms_scaling.pkl', scale_dict)
###Output
_____no_output_____
###Markdown
With this dictionary, we can now use create a DataGenerator instance.A word about the normalization. The normalization is handled by Normalizer classes, defined in `normalization.py`. As default in `DataGenerator`, for input normalization the `InputNormalizer` class is used, while the outputs are scaled using the `DictNormalizer`. `DataGenerator` takes a tuple of strings for `input_transform`. This tuple describes which arrays from the normalization file (`norm_fn`) the input will be subtracted and divided by. If you want to create your own fancy normalization, you have to create such a Normalizer which has to have a `transform` method.
###Code
from cbrain.data_generator import DataGenerator
train_gen = DataGenerator(
data_fn = '/local/S.Rasp/preprocessed_data/000_train_shuffle.nc',
input_vars = in_vars,
output_vars = out_vars,
norm_fn = '/local/S.Rasp/preprocessed_data/000_norm.nc',
input_transform = ('mean', 'maxrs'),
output_transform = scale_dict,
batch_size=1024,
shuffle=True
)
X, Y = train_gen[0]; X.shape, Y.shape
###Output
_____no_output_____
###Markdown
Create a model with conservation layersNext we need to create a model. This is just basic Keras. I will show here how to use the conservation layers written by Tom. These layers require some additional input, in particular the normalization information.
###Code
from cbrain.layers import *
from tensorflow.keras.layers import *
inp_layer = Input(shape=(304,))
x = Dense(214, activation='elu')(inp_layer)
x = SurRadLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
x = MassConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
out_layer = EntConsLayer(
inp_div=train_gen.input_transform.div,
inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ']
)([inp_layer, x])
model = tf.keras.models.Model(inp_layer, out_layer)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 214) 65270 input_1[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
dense[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 65,270
Trainable params: 65,270
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Train the model with the weakly constrained energy lossAgain, this is basic Keras. Tom implemented the option to penalize the network for violating physical constraints, however, which we will do here.I implemented the weakly constrained loss as a class in `losses.py`. When initializing this class, you need to pass the input_tensor from the model and again the normalization information.This loss computes three losses internally: the standard MSE, a loss for mass conservation and a loss for enthalpy conservation. You can weight the losses by changing the `alpha_*` parameters.
###Code
from cbrain.losses import *
weak_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'])
mass_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=1, alpha_ent=0, name='mass_loss')
ent_loss = WeakLoss(inp_layer, inp_div=train_gen.input_transform.div, inp_sub=train_gen.input_transform.sub,
norm_q=scale_dict['PHQ'], alpha_mass=0, alpha_ent=1, name='ent_loss')
model.compile(tf.keras.optimizers.Adam(lr=0.01), loss=weak_loss, metrics=[mass_loss, ent_loss, mse])
model.fit_generator(train_gen, epochs=5)
###Output
Epoch 1/5
3448/3448 [==============================] - 109s 32ms/step - loss: 740.1542 - mass_loss: 3.6446e-11 - ent_loss: 7.8155e-11 - mean_squared_error: 1480.3085
Epoch 2/5
3448/3448 [==============================] - 67s 20ms/step - loss: 540.9411 - mass_loss: 6.3662e-11 - ent_loss: 1.4316e-10 - mean_squared_error: 1081.8822
Epoch 3/5
3448/3448 [==============================] - 67s 19ms/step - loss: 419.6522 - mass_loss: 8.7082e-11 - ent_loss: 1.6894e-10 - mean_squared_error: 839.3044
Epoch 4/5
3448/3448 [==============================] - 66s 19ms/step - loss: 356.3085 - mass_loss: 1.0223e-10 - ent_loss: 1.9572e-10 - mean_squared_error: 712.6170
Epoch 5/5
3448/3448 [==============================] - 67s 19ms/step - loss: 322.0714 - mass_loss: 1.1291e-10 - ent_loss: 2.1134e-10 - mean_squared_error: 644.1428
###Markdown
Naturally, since we are using a conserving network, the conservation losses are basically zero. Puh... Train the network using the train.py scriptDoing the training in a notebook is good for experimentation but for testing different configurations and using large training datasets, we need a command line script, which is called `train.py`. So, let's check out how to use it.Again we will create a configuration file. This is `000_example.yml`:``` Example training configuration fileexp_name: 000_exampledata_dir: /local/S.Rasp/preprocessed_data/train_fn: 000_train_shuffle.ncvalid_fn: 000_valid.ncnorm_fn: 000_norm.ncinputs: [QBP, QCBP, QIBP, TBP, VBP, Qdt_adiabatic, QCdt_adiabatic, QIdt_adiabatic, Tdt_adiabatic, Vdt_adiabatic, PS, SOLIN, SHFLX, LHFLX]outputs: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND, QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]input_sub: meaninput_div: maxrsoutput_dict: /home/s/S.Rasp/repositories/CBRAIN-CAM/nn_config/scale_dicts/001_toms_scaling.pklhidden_layers: [128, 214]epochs: 10conservation_layer: Trueloss: weak_loss```Most of the arguments are pretty self-explanatory (I hope). If you are confused look at the definition at the bottom of `train.py`.This script automatically uses learning rate decay.Additionally, and very importantly for the eventual implementation in CAM, it also saves the network and weights.
###Code
!python train.py -c nn_config/000_example.yml
###Output
2019-04-03 15:23:20.846557: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 AVX512F FMA
2019-04-03 15:23:20.982342: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1432] Found device 0 with properties:
name: GeForce GTX 1080 major: 6 minor: 1 memoryClockRate(GHz): 1.7335
pciBusID: 0000:b3:00.0
totalMemory: 7.93GiB freeMemory: 5.87GiB
2019-04-03 15:23:20.982378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:21.453378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:21.453417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:21.453425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:21.453721: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
04/03/2019 03:23:21 PM Create training and validation data generators
04/03/2019 03:23:21 PM Build model
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 304) 0
__________________________________________________________________________________________________
dense (Dense) (None, 256) 78080 input_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256) 0 dense[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 256) 65792 leaky_re_lu[0][0]
__________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 256) 0 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 256) 65792 leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 256) 0 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 214) 54998 leaky_re_lu_2[0][0]
__________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 214) 0 dense_3[0][0]
__________________________________________________________________________________________________
sur_rad_layer (SurRadLayer) (None, 216) 0 input_1[0][0]
leaky_re_lu_3[0][0]
__________________________________________________________________________________________________
mass_cons_layer (MassConsLayer) (None, 217) 0 input_1[0][0]
sur_rad_layer[0][0]
__________________________________________________________________________________________________
ent_cons_layer (EntConsLayer) (None, 218) 0 input_1[0][0]
mass_cons_layer[0][0]
==================================================================================================
Total params: 264,662
Trainable params: 264,662
Non-trainable params: 0
__________________________________________________________________________________________________
None
04/03/2019 03:23:21 PM Compile model
04/03/2019 03:23:22 PM Train model
2019-04-03 15:23:23.590239: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1511] Adding visible gpu devices: 0
2019-04-03 15:23:23.590287: I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-04-03 15:23:23.590296: I tensorflow/core/common_runtime/gpu/gpu_device.cc:988] 0
2019-04-03 15:23:23.590305: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1001] 0: N
2019-04-03 15:23:23.590595: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 5642 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080, pci bus id: 0000:b3:00.0, compute capability: 6.1)
Learning rate = 0.01
Epoch 1/10
3448/3448 [==============================] - 89s 26ms/step - loss: 66.6859 - mean_squared_error: 133.3718 - mass_loss: 3.4461e-10 - ent_loss: 3.2758e-10 - val_loss: 51.0592 - val_mean_squared_error: 102.1183 - val_mass_loss: 3.8388e-10 - val_ent_loss: 1.9052e-10
Learning rate = 0.01
Epoch 2/10
3448/3448 [==============================] - 82s 24ms/step - loss: 50.0245 - mean_squared_error: 100.0490 - mass_loss: 3.7326e-10 - ent_loss: 3.3711e-10 - val_loss: 50.6125 - val_mean_squared_error: 101.2250 - val_mass_loss: 4.0006e-10 - val_ent_loss: 2.0463e-10
Learning rate = 0.002
Epoch 3/10
3448/3448 [==============================] - 82s 24ms/step - loss: 44.0968 - mean_squared_error: 88.1936 - mass_loss: 3.8555e-10 - ent_loss: 3.3528e-10 - val_loss: 45.2651 - val_mean_squared_error: 90.5302 - val_mass_loss: 3.9009e-10 - val_ent_loss: 1.8705e-10
Learning rate = 0.002
Epoch 4/10
3448/3448 [==============================] - 82s 24ms/step - loss: 43.1805 - mean_squared_error: 86.3610 - mass_loss: 3.9110e-10 - ent_loss: 3.3402e-10 - val_loss: 44.7589 - val_mean_squared_error: 89.5178 - val_mass_loss: 3.6946e-10 - val_ent_loss: 1.7980e-10
Learning rate = 0.0004000000000000001
Epoch 5/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.5774 - mean_squared_error: 83.1547 - mass_loss: 3.9188e-10 - ent_loss: 3.3378e-10 - val_loss: 43.3000 - val_mean_squared_error: 86.5999 - val_mass_loss: 4.0520e-10 - val_ent_loss: 1.9103e-10
Learning rate = 0.0004000000000000001
Epoch 6/10
3448/3448 [==============================] - 83s 24ms/step - loss: 41.2948 - mean_squared_error: 82.5897 - mass_loss: 3.9295e-10 - ent_loss: 3.3513e-10 - val_loss: 43.2729 - val_mean_squared_error: 86.5458 - val_mass_loss: 3.8670e-10 - val_ent_loss: 1.8113e-10
Learning rate = 8.000000000000002e-05
Epoch 7/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8886 - mean_squared_error: 81.7773 - mass_loss: 3.9385e-10 - ent_loss: 3.3514e-10 - val_loss: 42.9063 - val_mean_squared_error: 85.8126 - val_mass_loss: 4.0482e-10 - val_ent_loss: 1.8694e-10
Learning rate = 8.000000000000002e-05
Epoch 8/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.8127 - mean_squared_error: 81.6254 - mass_loss: 3.9597e-10 - ent_loss: 3.3652e-10 - val_loss: 42.8711 - val_mean_squared_error: 85.7422 - val_mass_loss: 4.0361e-10 - val_ent_loss: 1.8883e-10
Learning rate = 1.6000000000000003e-05
Epoch 9/10
3448/3448 [==============================] - 83s 24ms/step - loss: 40.7166 - mean_squared_error: 81.4333 - mass_loss: 3.9348e-10 - ent_loss: 3.3688e-10 - val_loss: 42.8298 - val_mean_squared_error: 85.6596 - val_mass_loss: 4.0140e-10 - val_ent_loss: 1.8511e-10
Learning rate = 1.6000000000000003e-05
Epoch 10/10
3448/3448 [==============================] - 82s 24ms/step - loss: 40.7036 - mean_squared_error: 81.4072 - mass_loss: 3.9575e-10 - ent_loss: 3.3639e-10 - val_loss: 42.8211 - val_mean_squared_error: 85.6422 - val_mass_loss: 4.0147e-10 - val_ent_loss: 1.8437e-10
04/03/2019 03:37:14 PM Saving model as ./saved_models/000_example/model.h5
###Markdown
We see that we are overfitting quite a bit which is to be expected with such a small dataset.
###Code
!ls ./saved_models/000_example/
###Output
inp_div.txt layer2_bias.txt layer4_bias.txt weights.h5
inp_sub.txt layer2_kernel.txt layer4_kernel.txt
layer1_bias.txt layer3_bias.txt model.h5
layer1_kernel.txt layer3_kernel.txt out_scale.txt
###Markdown
Model diagnosticsFinally, we would like to know how well our model does in more detail that just looking at the loss.For this I wrote the `ModelDiagnostics` class. It is designed to be convenient. Let's see what it can do.For basic usage it only needs two arguments: First, the configuration file used for neural network training and second the data file which is to be used for validation.
###Code
from cbrain.model_diagnostics import ModelDiagnostics
md = ModelDiagnostics('nn_config/000_example.yml', '/local/S.Rasp/preprocessed_data/000_valid.nc')
###Output
_____no_output_____
###Markdown
PlottingThe first thing we can do is plot the truth alongside the model prediction. These functions take all the regular matplotlib arguments.
###Code
md.plot_double_yz(itime=0, ilon=0, var='PHQ', vmin=-8e-8, vmax=8e-8, cmap='bwr');
md.plot_double_xy(0, 0, 'PRECT');
###Output
_____no_output_____
###Markdown
Compute statisticsLast but not least, we can compute statistics over the entire validation dataset.
###Code
md.compute_stats()
###Output
_____no_output_____
###Markdown
Now there is a stats dictionary containing many statistics. If you want to implement your own statistics, you wil have to do so in the `compute_stats` method.
###Code
md.stats.keys()
###Output
_____no_output_____
###Markdown
One common statistic, for example, is the R2. So let's plot that. The R2 is averaged over time but all other dimensions are still available. Further, the vertical level is still stacked, so we will have to figure out the variable indices.
###Code
md.stats['r2'].shape
plt.matshow(md.stats['r2'][:, :, md.get_output_var_idx('PHQ')].mean(1).T, vmin=0, vmax=1, cmap='Spectral')
plt.colorbar(shrink=0.7)
###Output
_____no_output_____
###Markdown
We also like looking at the horizontally averaged R2 for each variable.
###Code
md.stats['hor_r2'].shape
md.valid_gen.output_vars[:7]
# Get the vertical coordinate in pressure levels
PS = 1e5; P0 = 1e5;
P = P0*hyai+PS*hybi; # Total pressure [Pa]
P = (P[1:] + P[:-1]) / 2 / 100
fig, ax = plt.subplots(figsize=(8, 8))
for v in md.valid_gen.output_vars[:7]:
ax.plot(md.stats['hor_r2'][md.get_output_var_idx(v)], P, label=v)
ax.set_xlim(0, 1)
ax.invert_yaxis()
plt.legend()
###Output
_____no_output_____
###Markdown
Quick start*Last updated: Jan 27, 2020*You have *whole genome* (WGS), *whole exome* (WES) or *targeted sequencing* (TGS) data from one or more tumour samples from the same patient and you wish to find clusters of mutations with similar *variant allele fractions* (VAF). You can use `clonosGP` to do that. The data can be either cross-sectional or longitudinal. In the case of longitudinal data, we can optionally exploit the temporal spacing of samples to improve clustering. 1. First, import the necessary packages
###Code
import logging as log
import warnings as warn
import pandas as pnd
import pymc3 as pmc
import clonosGP as cln
log.getLogger('theano').setLevel(log.ERROR) ## supress annoying warnings ...
warn.filterwarnings("ignore", category=FutureWarning) ## from Theano
###Output
_____no_output_____
###Markdown
2. Next, load your data
###Code
DATA = pnd.read_csv('data/cll_Schuh_2012_CLL003.csv')
DATA
###Output
_____no_output_____
###Markdown
As you can see, the data are stored in long format. The minimum set of columns that must be present are `SAMPLEID`, `MUTID`, `r` and `R`, where `r` is the number of reads harbouring the mutation and `R` is the sum of `r` plus the number of reads harbouring the reference allele. In the case of longitudinal data (as above), column `TIME` may be present, indicating the relative time (in days or months) of sample collection. Each line should include information on a single mutation in a single sample. No missing values are permitted. Importantly, each mutation should be covered (i.e. `R>0`) in all samples, but not necessarily present in all samples, in which case `r=0`, as shown below:
###Code
DATA.pivot_table(index='MUTID', columns=['SAMPLEID', 'TIME'], values=['r', 'R'])
###Output
_____no_output_____
###Markdown
The following columns should also be included, if available:1. `PURITY`: the purity of each sample. If missing, it is assumed that `PURITY=1`. 2. `CNn`: the local copy number at the locus of each mutation in the germline. If missing, it is assumed that `CNn=2`.3. `CNt`: the local copy number at the locus of each mutation in each sample. If missing, it is assumed that `CNt=2`.4. `CNm`: the number of chromosomes harbouring each mutation in each sample. If missing, it will be estimated from the data. Any additional columns are ignored. 3. Run `clonosGP`
###Code
pmc.tt_rng(42) # for reproducibility
RES = cln.infer(DATA,
model_args={'prior': 'GP0', 'cov': 'Mat32'}, ## change "GP0" to "Flat", for cross-sectional data
pymc3_args={'niters': int(10e3)}) ## Run inference for 10K iterations
## the above may take some time to start running the first time you try it.
###Output
INFO:clonosGP:No PURITY column in the data. Assuming all samples have purity 100%.
INFO:clonosGP:No CNn column in the data. Assuming germline is diploid over all provided loci.
INFO:clonosGP:No CNt column in the data. Assuming all tumour samples are diploid over all provided loci.
INFO:clonosGP:No CNm column in the data. Multiplicity values will be approximated.
Average Loss = 377.74: 100%|██████████| 10000/10000 [00:13<00:00, 718.83it/s]
Finished [100%]: Average Loss = 377.74
INFO:pymc3.variational.inference:Finished [100%]: Average Loss = 377.74
INFO:clonosGP:Calculating posterior cluster weights and centres.
INFO:clonosGP:Calculating posterior CCF values.
INFO:clonosGP:Calculating posterior predictive distribution.
INFO:clonosGP:Calculating GP-related quantities.
INFO:clonosGP:Calculating dispersion(s).
INFO:clonosGP:Finished.
###Markdown
4. Visualise the results
###Code
cln.viz.plot_summary(RES, figsize=(10, 15));
###Output
_____no_output_____
###Markdown
Make sure that the algorithm has converged by inspecting the loss (top-left panel). If not, rerun with a larger number of iterations. Cluster weights (i.e. proportions of mutations belonging to each cluster) are illustrated at the top-right panel. A color-encoded hard clustering of mutations and the time course of each mutation cluster are provided in the two middle panels. Finally, estimated sample-specific dispersions, time scales (lengths) and amplitudes of the underlying Gaussian Process for modelling cluster dynamics are provided at the bottom three panels, respectively. For an overview of the fitted model, check below:
###Code
cln.viz.plot_samples(RES, figsize=(7, 7));
###Output
_____no_output_____
###Markdown
5. Inspect the results in more detail
###Code
RES['data']
###Output
_____no_output_____
###Markdown
Quickstart guideIn this notebook we will through all the steps from downloading the data and training a model to evaluating the results. Check out the `environment.yml` file for the required Python packages.
###Code
import xarray as xr
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Downloading the dataThe data is hosted here. For this guide we will simply download the 500 hPa geopotential data (Z500).
###Code
# This might take a few minutes
!wget "https://dataserv.ub.tum.de/s/m1524895/download?path=%2F5.625deg%2Fgeopotential_500&files=geopotential_500_5.625deg.zip" -O geopotential_500_5.625deg.zip
!mkdir -p geopotential_500
!unzip -d geopotential_500/ geopotential_500_5.625deg.zip
###Output
Archive: geopotential_500_5.625deg.zip
inflating: geopotential_500/geopotential_500hPa_1979_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1980_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1981_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1982_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1983_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1984_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1985_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1986_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1987_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1988_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1989_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1990_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1991_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1992_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1993_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1994_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1995_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1996_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1997_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1998_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_1999_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2000_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2001_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2002_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2003_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2004_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2005_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2006_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2007_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2008_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2009_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2010_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2011_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2012_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2013_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2014_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2015_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2016_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2017_5.625deg.nc
inflating: geopotential_500/geopotential_500hPa_2018_5.625deg.nc
###Markdown
Open the data
###Code
z500 = xr.open_mfdataset('geopotential_500/*.nc', combine='by_coords')
z500
# Plot an example
z500.z.isel(time=0).plot();
###Output
_____no_output_____
###Markdown
Create a simple climatological forecastRemember that we are using the years 2017 and 2018 for testing/evaluation, so we are not allowed to use these years to train any data-driven model.For more information on the climatology and persistence forecasts used in the paper check out `notebooks/1-climatology-persistence.ipynb`.
###Code
# To speed things up we will just take the mean for 2016
climatology = z500.sel(time=slice('2016', '2016')).mean('time').load()
climatology.z.plot()
###Output
_____no_output_____
###Markdown
Evaluate the climatologyPlease check the paper for details on the evaluation metric. Here we will use the functions from `src/score.py`. To make sure we are always using the same targets for testing, we also implemented a function to load the test data.
###Code
from src.score import *
z500_test = load_test_data('geopotential_500/', 'z')[::12] # Take data only every 12 hours to spped up computation on Binder
rmse_climatology = compute_weighted_rmse(climatology.z, z500_test).load()
rmse_climatology
###Output
_____no_output_____
###Markdown
So we get a climatological RMSE of 1080 m^2/s^2 which is very similar to the RMSE we get for the climatology for all training years. Train a neural networkNow let's train a simple convolutional neural network. We are using several functions defined in `src/train_nn.py`. You can use and modify these or write your own function for data loading etc. For more information on the the networks check out `notebooks/3-cnn-example.ipynb`.
###Code
from src.train_nn import *
# This limits TF memory usage on the GPU
# limit_mem()
###Output
_____no_output_____
###Markdown
First, we need to create the data generators for training, validation and testing. The main reason why we are using data generators instead of just loading the data as Numpy arrays is that this would require loading the same data twice since the features and targets are the same fields, just offset in time.
###Code
bs = 32
lead_time = 5*24
var_dict = {'z': None}
# Use 2015 for training and 2016 for validation
dg_train = DataGenerator(
z500.sel(time=slice('2015', '2015')), var_dict, lead_time, batch_size=bs, load=True)
dg_valid = DataGenerator(
z500.sel(time=slice('2016', '2016')), var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
# Now also a generator for testing. Impartant: Shuffle must be False!
dg_test = DataGenerator(z500.sel(time=slice('2017', '2018')).isel(time=slice(0, None, 12)), # Limiting the data for Binder
var_dict, lead_time, batch_size=bs, mean=dg_train.mean, std=dg_train.std, shuffle=False)
X, y = dg_train[0]
# Batches have dimensions [batch_size, lat, lon, channels]
X.shape, y.shape
###Output
_____no_output_____
###Markdown
Now let's build a simple fully convolutional network. We are using periodic convolutions in the longitude direction. These are defined in `train_nn.py`.
###Code
cnn = keras.models.Sequential([
PeriodicConv2D(filters=32, kernel_size=5, activation='relu', input_shape=(32, 64, 1,)),
PeriodicConv2D(filters=1, kernel_size=5)
])
cnn.summary()
cnn.compile(keras.optimizers.Adam(1e-4), 'mse')
# Train a little bit ;)
cnn.fit_generator(dg_train, epochs=1, validation_data=dg_valid)
###Output
270/270 [==============================] - 6s 21ms/step - loss: 1.7766 - val_loss: 1.0282
###Markdown
Create a prediction and compute scoreNow that we have a model (albeit a crappy one) we can create a prediction. For this we need to create a forecast for each forecast initialization time in the testing range (2017-2018) and unnormalize it. We then convert the forecasts to a Xarray dataset which allows us to easily compute the RMSE. All of this is taken care of in the `create_predictions()` function.
###Code
preds = create_predictions(cnn, dg_test)
preds
compute_weighted_rmse(preds.z, z500_test).load()
time = '2017-03-02T00'
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
z500_test.sel(time=time).plot(ax=ax1)
preds.sel(time=time).z.plot(ax=ax2);
###Output
_____no_output_____
###Markdown
The pitch :
###Code
import numpy as np
import matplotlib.pyplot as plt
import physipy
from physipy import units, constants, set_favunit, setup_matplotlib
from physipy import m, kg, K, sr
# reading units and constants
W = units["W"]
mum = units["mum"]
hp = constants["Planck"]
c = constants["c"]
kB = constants["k"]
# create a function, and attach a favorite unit (for display)
@set_favunit(W/(m**2*sr*mum))
def planck_W(wl, T):
return 2*hp*c**2/(wl**5) * 1/(np.exp(hp*c/(wl*kB*T))-1)/sr
# create scalar with unit
T_bb = 5800*K
# create an array with unit
ech_wl = np.linspace(0.3, 3, 100)*mum
ech_wl.favunit = mum
# activate favunit handling for automatic plot axis label
setup_matplotlib()
plt.plot(ech_wl, planck_W(ech_wl, T_bb))
###Output
_____no_output_____
###Markdown
A quickstart on physipyHomepage of project : [physipy](https://github.com/mocquin/physipy)
###Code
import numpy as np
import physipy
###Output
_____no_output_____
###Markdown
Dimension object The Dimension object is basically a dictionnary that stores the dimensions' name and power. A dimension can be created different ways :
###Code
a_length_dimension = physipy.Dimension("L")
print(a_length_dimension)
a_length_dimension
a_speed_dimension = physipy.Dimension({"L": 1, "T":-1})
print(a_speed_dimension)
a_speed_dimension
###Output
L/T
###Markdown
Dimensions can be multiplied and divided as expected :
###Code
product_dim = a_length_dimension * a_speed_dimension
print(product_dim)
product_dim
div_dim = a_length_dimension / a_speed_dimension
print(div_dim)
div_dim
###Output
T
###Markdown
You can display a dimension in terms of corresponding SI unit (returns a string) :
###Code
print(a_length_dimension.str_SI_unit()) # meters
print(a_speed_dimension.str_SI_unit()) # meters/second
###Output
m
m/s
###Markdown
Other operations are avalaible :
###Code
print((a_length_dimension**2).str_SI_unit())
print(a_length_dimension == a_speed_dimension)
print((1/a_length_dimension).str_SI_unit())
###Output
m**2
False
1/m
###Markdown
Quantity object The Quantity class is simply the association of a numerical value, and a dimension. It can be created several ways :
###Code
yo_mama_weight = physipy.Quantity(2000, physipy.Dimension("M"))
print(yo_mama_weight)
yo_papa_weight = 2000 * physipy.kg
print(yo_papa_weight)
print(yo_mama_weight == yo_papa_weight)
###Output
True
###Markdown
If dimension analysis allows it, you can perform standard operations on and between Quantity objects :
###Code
print(yo_mama_weight + yo_papa_weight)
# speed of light
c = physipy.constants["c"]
E_mama = yo_mama_weight * c**2
print(E_mama)
###Output
1.7975103574736352e+20 kg*m**2/s**2
###Markdown
Unit conversion and displaying You can change the unit a Quantity displays by changing its ```favunit``` attribute, which means "favorite unit". It default to ```None```which displays the Quantity in SI-units.
###Code
print(yo_mama_weight.favunit)
# displaying in SI-unit, kg
print(yo_mama_weight)
# changing the favunit
g = physipy.units["g"]
yo_mama_weight.favunit = g
# now displayed in grams
print(yo_mama_weight)
###Output
2000000.0 g
###Markdown
Another example :
###Code
speed_of_light = c
print(c)
mile = physipy.imperial_units["mil"]
one_hour = physipy.units["h"]
retarded_speed_unit = mile / one_hour
print(c.to(retarded_speed_unit))
###Output
4.249026963779527e+16 mil/h
###Markdown
Units and constants Lots of units and constants are packed up in various dicts. The keys are the symbol of the units/constant, and the value is the corresponding quantity.
###Code
# pico-Ampere
pA = physipy.units["pA"]
print(pA)
# Planck's constant
h_p = physipy.constants["h"]
print(h_p)
###Output
6.62607004e-34 kg*m**2/s
###Markdown
Note that units and constants are just Quantity objects !
###Code
print(type(pA))
print(type(h_p))
###Output
<class 'physipy.quantity.quantity.Quantity'>
<class 'physipy.quantity.quantity.Quantity'>
###Markdown
Numpy compatibility You can define a Quantity with a numpy.ndarray value :
###Code
position_sampling = np.array([1,2,3]) * physipy.m
print(position_sampling)
time_sampling = physipy.Quantity([0.1, 0.2, 0.3], physipy.Dimension("T"))
print(time_sampling)
###Output
[0.1 0.2 0.3] s
###Markdown
You can then play with those as you would with regular ndarrays, as long as you respect dimensional analysis :
###Code
print(position_sampling / time_sampling)
print(2 * position_sampling)
try:
position_sampling + time_sampling
except Exception as e:
print("You can't add a length and a time dummy !")
print(e)
from math import pi
try:
# you cant compute the cos of a length
np.cos(position_sampling)
except:
# but you can for a plane angle
an_angle_array = np.array([0, pi/2, pi]) * physipy.rad
print(np.cos(an_angle_array))
# it also works with degrees of course
another_angle_array = np.array([0, 90, 180]) * physipy.units["deg"]
print(np.cos(another_angle_array))
###Output
[1.0 6.123233995736766e-17 -1.0]
[1.0 6.123233995736766e-17 -1.0]
###Markdown
List of constants and units Units
###Code
print(physipy.SI_units.keys())
print(physipy.SI_derived_units.keys())
print(physipy.imperial_units.keys())
###Output
dict_keys(['in', 'ft', 'yd', 'mi', 'mil', 'NM', 'fur', 'ac', 'gallon', 'quart', 'pint', 'cup', 'foz', 'tbsp', 'tsp', 'oz', 'lb', 'st', 'ton', 'slug', 'kn', 'lbf', 'kip', 'BTU', 'cal', 'kcal', 'psi', 'hp'])
###Markdown
Constants
###Code
print(physipy.scipy_constants.keys())
print(physipy.scipy_constants_codata.keys())
###Output
dict_keys(['yotta', 'zetta', 'exa', 'peta', 'tera', 'giga', 'mega', 'kilo', 'hecto', 'deka ', 'deci ', 'centi', 'milli', 'micro', 'nano ', 'pico ', 'femto', 'atto ', 'zepto', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'zebi', 'yobi', 'gram', 'metric_ton', 'grain', 'lb', 'pound', 'blob', 'slinch', 'slug', 'oz', 'ounce', 'stone', 'long_ton', 'short_ton', 'troy_ounce', 'troy_pound', 'carat', 'm_u', 'u', 'atomic_mass', 'deg', 'arcmin', 'arcminute', 'arcsec', 'arcsecond', 'minute', 'hour', 'day', 'week', 'year', 'Julian_year', 'inch', 'foot', 'yard', 'mile', 'mil', 'pt', 'point', 'survey_foot', 'survey_mile', 'nautical_mile', 'fermi', 'angstrom', 'micron', 'au', 'astronomical_unit', 'light_year', 'parsec', 'atm', 'atmosphere', 'bar', 'torr', 'mmHg', 'psi', 'hectare', 'acre', 'liter', 'litre', 'gallon', 'gallon_US', 'gallon_imp', 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', 'bbl', 'barrel', 'kmh', 'mph', 'mach', 'speed_of_sound', 'knot', 'zero_Celsius', 'degree_Fahrenheit', 'eV', 'electron_volt', 'calorie', 'calorie_th', 'calorie_IT', 'erg', 'Btu', 'Btu_IT', 'Btu_th', 'ton_TNT', 'hp', 'horsepower', 'dyn', 'dyne', 'lbf', 'pound_force', 'kgf', 'kilogram_force'])
###Markdown
Accera Quickstart ExampleIn this example, we will:* Implement matrix multiplication with a ReLU activation (matmul + ReLU), commonly used in in machine learning algorithms * Generate two implementations: a naive algorithm and one with loop transformations* Compare the timings of both implementations SetupFirst, we'll install Accera using `pip`. Optional: if running this notebook locally* Linux/macOS: install gcc using `apt install gcc`.* Windows: install Microsoft Visual Studio and run `vcvars64.bat` to setup Visual Studio tools in your `PATH` before starting the Jupyter environment.
###Code
!pip install accera
###Output
_____no_output_____
###Markdown
BuildRun the code below to implement `ReLU(C + A @ B)` on arrays `A`, `B`, and `C`. We'll build a package called `"hello_accera"` that will export both versions as C functions.
###Code
import accera as acc
# define placeholder inputs/output
A = acc.Array(role=acc.Array.Role.INPUT, shape=(512, 512))
B = acc.Array(role=acc.Array.Role.INPUT, shape=(512, 512))
C = acc.Array(role=acc.Array.Role.INPUT_OUTPUT, shape=(512, 512))
# implement the logic for matmul and relu
matmul = acc.Nest(shape=(512, 512, 512))
i1, j1, k1 = matmul.get_indices()
@matmul.iteration_logic
def _():
C[i1, j1] += A[i1, k1] * B[k1, j1]
relu = acc.Nest(shape=(512, 512))
i2, j2 = relu.get_indices()
@relu.iteration_logic
def _():
C[i2, j2] = acc.max(C[i2, j2], 0.0)
package = acc.Package()
# fuse the i and j indices of matmul and relu, add to the package
schedule = acc.fuse(matmul.create_schedule(), relu.create_schedule(), partial=2)
package.add(schedule, args=(A, B, C), base_name="matmul_relu_fusion_naive")
# transform the schedule, add to the package
# here we will focus only on the j index. For a more complete example, see:
# https://microsoft.github.io/Accera/Tutorials/Optimized_MatMul/
tile_size_j = 256
target = acc.Target(category=acc.Target.Category.CPU)
f, i, j, k = schedule.get_indices()
jj = schedule.split(j, tile_size_j)
jjj = schedule.split(jj, (target.vector_bytes // 4) * 2) # there are 2 vfma execution units, each holding (target.vector_bytes // 4) 32-bit float elements
jjjj = schedule.split(jjj, target.vector_bytes // 4) # each SIMD register holds (target.vector_bytes // 4) 32-bit float elements
schedule.reorder(j, f, k, i, jj, jjj, jjjj) # reorder the loops
plan = schedule.create_plan(target)
plan.kernelize(unroll_indices=(jjj,), vectorize_indices=jjjj) # unroll and vectorize
package.add(plan, args=(A, B, C), base_name="matmul_relu_fusion_transformed")
# build a dynamically-linked package (a .dll or .so) that exports both functions
###Output
_____no_output_____
###Markdown
BenchmarkIn the previous section, we built a binary (.so) and a header file (.hat). Next, we will load the package and compare the timings of both implementations.
###Code
import hatlib as hat
import numpy as np
# load the package
hat_package = hat.load("hello_accera.hat")
# call one of the functions with test inputs
A_test = np.random.rand(512, 512).astype(np.float32)
B_test = np.random.rand(512, 512).astype(np.float32)
C_test = np.zeros((512, 512)).astype(np.float32)
C_numpy = np.maximum(C_test + A_test @ B_test, 0.0)
matmul_relu = hat_package["matmul_relu_fusion_transformed"]
matmul_relu(A_test, B_test, C_test)
# check correctness
np.testing.assert_allclose(C_test, C_numpy, atol=1e-3)
# benchmark all functions
hat.run_benchmark("hello_accera.hat", batch_size=5, min_time_in_sec=5)
###Output
_____no_output_____ |
docs/introduction/non-developer.ipynb | ###Markdown
User Quick StartNote if you are viewing this on github, you may need to view it on Google Colab [](https://colab.research.google.com/github/Ahuge/sept/blob/release/docs/introduction/non-developer.ipynb) IntroductionThis `User Quick Start` guide tries to demonstrate- How to get started modifying your own ``SEPT`` templates- Why you should care and flexibility you can gain from using ``SEPT`` InstallationUsers can easily install ``SEPT`` according to the following steps:Installing ``SEPT`` from the python packaging index is as simple as executing the following command:```bashpip install sept``` Getting StartedThis tutorial is aimed at someone that is not a developer and instead will be a user of some program that a developer writes that takes advantage of ``SEPT``For this tutorial, we are going to learn how to write ``SEPT`` templates using the simple application that was created from the [Developer Sample Application](./developer.ipynbsample-qt-application) tutorial.The finished product from that tutorial will look similar to the following:This application is a program that will prepare several "Version" entities from your internal `Shotgun `_ website. If you haven't worked with Shotgun before, don't worry. You should be able to substitute Shotgun for any production tracking software, and a "Version" entity for any rendered image sequence that can is approved to send back to the movie studio.There are 2 main components to this application.On the left side of the dialog you can see the editor window where we can test our ``SEPT`` template.On the right side is the help window that shows all of the ``Token`` and ``Operator`` keywords that you have access to. Modifying TemplatesThe following are some examples of path templates that you may write. For simplicity's sake, the examples are going to work off of a single input file, however in reality you may be working with tens or even hundreds of input files at once. The quicktime file will have the following information about it```yamlproject: HeroJourneysequence: Battleshot: 001step: compversion: 2extension: mov```Within your studio, the filepath looks like `HeroJourney_Battle_001_comp_v1.mov`.
###Code
!pip install sept
from sept import PathTemplateParser
data = {
"project": "HeroJourney",
"sequence": "Battle",
"shot": "001",
"step": "comp",
"version": 2,
"extension": "mov",
}
parser = PathTemplateParser()
###Output
_____no_output_____
###Markdown
First ExampleIn this example, your client expects the movie file to no longer have the project code when you deliver it to them. For example, we want the filename to be `Battle_001_comp_v2.mov`. This means we need to write a custom template to remove the project code. The following code block allows you to write a `SEPT` template and execute it to see the result.
###Code
# Type your SEPT template here:
template_str = "{{sequence}}_{{shot}}_{{step}}_v{{version}}.{{extension}}"
result = parser.parse(template_str, data)
print(result)
###Output
_____no_output_____
###Markdown
Breaking It DownThe template above takes the `sequence`, `shot`, and `step` tokens and joins them with an underscore. It then adds "_v" and the `version` token to the end before adding the file `extension` token at the end. To put a `Token` in your template you can place the name of your `Token` between two sets of curly brackets and `SEPT` will write out whatever value is in the `data` with that name. ```{{tokenName}}``` You can put any characters you would like outside of the curly brackets and they will be rendered exactly as you have written them. Introduction To OperatorsThere are times when the client requires naming that cannot be created by just adding tokens together from your Shotgun data. In these cases you may need to apply an `Operator` to a `Token` to change it in some way. `SEPT` provides several common operations out of the box but a developer can write custom ones that may apply better to your specific use case. If there is functionality that `SEPT` does not provide out of the box that you think it should, please reach out and let me know what you think it should provide! Using An OperatorTo use an `Operator` with your `Token` you need to modify how you write the expression. Instead of `{{tokenName}}`, you should instead write `{{operatorName:tokenName}}`. The syntax extends the syntax you already have learned by adding an `operatorName` followed by a full colon `:` and then the `tokenName`. Lowercase Template ExampleIn this example, our client has requested that everything in our filename is lowercase. Without using an `Operator`, there is no easy way to achieve this, you would need to request that a producer on the show changes the name of the sequence from "Battle" to "battle". If this is at the start of the project, it may not be a huge deal, but as soon as work has started, this becomes nearly impossible to achieve without having to redo work. But not to worry! Operators are here! To create a filename that looks like `battle_001_comp_v2.mov`, we just need to apply a "lower" `Operator` on the sequence `Token`.
###Code
# Type your SEPT template here:
template_str = "{{lower:sequence}}_{{shot}}_{{step}}_v{{version}}.{{extension}}"
result = parser.parse(template_str, data)
print(result)
###Output
_____no_output_____
###Markdown
Breaking It DownLuckily for us, we know that all of the shots are numbers and lowercasing a number doesn't change it so all we have to apply `lower` to is the "sequence" `Token`. If you compare this to the previous template that we wrote, you will see that the only change is the addition of our `Operator` right at the beginning. Operators With InputsThere are some advanced `Operator` types that require additional information from you to do their work. A good example of one of these would be the `replace` `Operator`. This allows you to find and replace characters in your Token. It needs to know what you want to find and what you want to replace it with. These are called "Operator Inputs" and any `Operator` that requires them should provide a description of what it expects and some examples of using it. To set the value of an input, we need to surround it in square brackets directly after the name of our `Operator`.```{{operatorName[inputValue]:tokenName}}```Some operators may expect multiple input values, the syntax for this is very similar, you just need to separate the input values with a comma.```{{operatorName[inputValue1,inputValue2]:tokenName}}``` Below is the description from `replace`: ```replace Operator Operator Inputs Find String: The characters that you want to search for and replace Required: True Replace String: The characters that you want to replace the "Find String" with. Required: TrueThe replace Operator allows you to find and replace characters in your Token. Examples (name = "alex"): {{replace[ex,an]:name}} -> "alan" {{replace[kite,dog:name}} -> "alex"``` Replace Sequence ExampleIn this example, our client has renamed the "Battle" sequence and decided that it is now called "Conflict". This messes us up because we have been working with it as "Battle" and we don't want to redo any work.Not to worry! Operators are here! To create a filename that looks like `Conflict_001_comp_v2.mov`, we just need to apply a "replace" `Operator` on the sequence `Token`.
###Code
# Type your SEPT template here:
template_str = "{{replace[Battle,Conflict]:sequence}}_{{shot}}_{{step}}_v{{version}}.{{extension}}"
result = parser.parse(template_str, data)
print(result)
###Output
_____no_output_____
###Markdown
Breaking It DownBecause we know that our sequence is called "Battle", we can search for the word "Battle" and replace it with "Conflict". This is preferable to just writing the word "Conflict" in there because now our template will work even if the input file is from a different sequence because we only want to replace the "Battle" sequence. This (again) is an extension of our syntax, compared to our "lower" example, you can see we have added `[Battle,Conflict]` in our template. If we refer to the "replace" documentation above we can see that "replace" takes two inputs, `Find String` and `Replace String`. In our example we have set the `Find String` equal to "Battle" and the `Replace String` equal to "Conflict". This means that any time it finds "Battle" as the sequence, it will replace it with "Conflict". Nested OperatorsThere may be certain times when you need to apply more than one `Operator` to a `Token` in order to get exactly what you want. `SEPT` fully supports this by nesting an `Operator` within another `Operator`. The syntax for this should be an extension of everything you have learned already. You can take an entire Token Expression and use it as the `Token` value for a separate Token Expression. This allows you to apply more than one `Operator` to a `Token`. ```{{operatorName2:{{operatorName1:tokenName}}}}``` `SEPT` will apply each `Operator` one at a time inside out. Nested Operator ExampleIn this example we need to return only the first 4 characters from our sequence and then make sure that they are all in uppercase. This will introduce you to two new operators that we need to use to achieve our goal. The first is the opposite to the "lower" `Operator` that we saw earlier, "upper", and the second is "substr" which allows us to return a subset of the `Token`. "upper" doesn't take any inputs and "substr" takes a `Start Location` and optionally a `End Location`. "substr" is a bit special in that it will only accept certain values as inputs, it takes numbers for the location in the `Token` as well as "start" and "end". To create a filename that looks like `BATT_001_comp_v2.mov`, we can use the following expression.
###Code
# Type your SEPT template here:
template_str = "{{substr[start,4]:{{upper:sequence}}}}_{{shot}}_{{step}}_v{{version}}.{{extension}}"
result = parser.parse(template_str, data)
print(result)
###Output
_____no_output_____ |
jupyter_notebooks/2_Planning/1_PlanningAsSearch/A-Star.ipynb | ###Markdown
A*
###Code
from queue import PriorityQueue
import numpy as np
from enum import Enum
class Action(Enum):
"""
An action is represented by a 3 element tuple.
The first 2 values are the delta of the action relative
to the current grid position. The third and final value
is the cost of performing the action.
"""
UP = (-1, 0, 1)
URIGHT = (-1, 1, 3*np.math.sqrt(2))
RIGHT = (0, 1, 1)
DRIGHT = (1, 1, 3*np.math.sqrt(2))
DOWN = (1, 0, 1)
DLEFT = (1, -1, 3*np.math.sqrt(2))
LEFT = (0, -1, 1)
ULEFT = (-1, -1, 3*np.math.sqrt(2))
def __str__(self):
if self == self.UP:
return '↑'
elif self == self.URIGHT:
return '↗'
elif self == self.RIGHT:
return '→'
elif self == self.DRIGHT:
return '↘'
elif self == self.DOWN:
return '↓'
elif self == self.DLEFT:
return '↙'
elif self == self.LEFT:
return '←'
elif self == self.ULEFT:
return '↖'
@property
def cost(self):
return self.value[2]
@property
def delta(self):
return (self.value[0], self.value[1])
def valid_actions(grid, current_node):
"""
Returns a list of valid actions given a grid and current node.
"""
# Select all actions as valid.
valid = [a for a in Action]
n, m = grid.shape[0] - 1, grid.shape[1] - 1
x, y = current_node
for a in Action:
new_x = x + a.delta[0]
new_y = y + a.delta[1]
# Check if the node is off the grid
if new_x < 0 or new_x > n or new_y < 0 or new_y > m:
valid.remove(a)
# Check if the node is an obstacle
elif grid[new_x, new_y] == 1:
valid.remove(a)
return valid
def visualize_path(grid, path, start):
sgrid = np.zeros(np.shape(grid), dtype=np.str)
sgrid[:] = ' '
sgrid[grid[:] == 1] = 'O'
pos = start
for a in path:
da = a.value
sgrid[pos[0], pos[1]] = str(a)
pos = (pos[0] + da[0], pos[1] + da[1])
sgrid[pos[0], pos[1]] = 'G'
sgrid[start[0], start[1]] = 'S'
return sgrid
###Output
_____no_output_____
###Markdown
HeuristicsThe heuristic function determines the $h()$ value for each cell based on the goal cell and the method chosen to determine it. The heuristic value can be the Euclidean distance between these cells $h= \left((x_i-x_{goal})^2+(y_i-y_{goal})^2\right)^{1/2}$ or the "Manhattan distance", which is the minimum number of moves required to reach the goal from the assigned cell $h = ||x_i-x_{goal}|| + ||y_i-y_{goal}||$. For this exercise you could use either, or something else which is *admissible* and *consistent*.The input variables include* **```position```** the coordinates of the cell for which you would like to determine the heuristic value.* **```goal_position```** the coordinates of the goal cell
###Code
# TODO: implement a heuristic function. This may be one of the
# functions described above or feel free to think of something
# else.
def heuristic(position, goal_position):
# Use euclidean distance as the heuristic for now
h = np.math.sqrt((goal_position[0] - position[0])**2 +
(goal_position[1] - position[1])**2)
return h
###Output
_____no_output_____
###Markdown
A* searchA* search is an extension of the cost search you implemented. A heuristic function is used in addition to the cost penalty. Thus if the setup is:* $c$ is the current cost* $g$ is the cost function* $h$ is the heuristic functionThen the new cost is $c_{new} = c + g() + h()$.The difference between $g$ and $h$ is that $g$ models the cost of performing actions, irrespective of the environment, while $h$ models the cost based on the environment, i.e., the distance to the goal. You know what comes next, turn the `TODOs` into `DONEs` :)
###Code
def a_star(grid, h, start, goal):
# Initialize search variables.
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
while not queue.empty():
#print(queue.queue)
item = queue.get()
current_node = item[1]
if current_node == start:
current_cost = 0.0
else:
current_cost = branch[current_node][0]
if current_node == goal:
print('Found a path.')
found = True
break
else:
for action in valid_actions(grid, current_node):
# get the tuple representation
da = action.delta
next_node = (current_node[0] + da[0],
current_node[1] + da[1])
# Calculate branch cost (action.cost + g)
branch_cost = action.cost + current_cost
# Calculate queue cost (action.cost + g + h)
queue_cost = branch_cost + heuristic(next_node, goal)
#print("Adding", (queue_cost, next_node))
if next_node not in visited:
visited.add(next_node)
branch[next_node] = (branch_cost, current_node, action)
queue.put((queue_cost, next_node))
#else:
# print(" ... already in visited set")
path = []
path_cost = 0
if found:
# retrace steps
n = goal
path_cost = branch[n][0]
while branch[n][1] != start:
path.append(branch[n][2])
n = branch[n][1]
path.append(branch[n][2])
else:
print('**********************')
print('Failed to find a path!')
print('**********************')
return path[::-1], path_cost
start = (0, 0)
goal = (4, 4)
grid = np.array([
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
])
path, cost = a_star(grid, heuristic, start, goal)
print(path, cost)
# S -> start, G -> goal, O -> obstacle
visualize_path(grid, path, start)
###Output
_____no_output_____ |
Spring2021-J/Problem-Sets/ProblemSet4/ProblemSet4_2021.ipynb | ###Markdown
Problem Set 4, Spring 2021, Villas-Boas Due As posted on bCourses Assignments pageThis problem set is to be done using R. To receive full credit, answers must include a correct answer, demonstrate all steps (codes and corresponding output) used to obtain the answer, and be uploaded to [Gradescope](https://www.gradescope.com/courses/226571), correctly indicating on which pages your answers to each question can be found. DataThe data set `Pset4_2021.dta` contains state-level data on the percent of children born with low birth-weight, infant mortality, and the percent of population on AFDC (Aid to Families with Dependent Children). AFDC was an entitlement program that guaranteed benefits to all recipients whose income and resources were below state-determined eligibility levels. The data set also contains variables describing additional state characteristics, including per-capita income, per-capita number of doctors, etc. for the year 1987.|Variable Name | Description | | :----------- | :----------------------- ||lowbrth | percent low weight births ||infmort | infant mortality rate (unit: deaths per 1,000 live births) ||afdcprt | participants in AFDC (unit: 1000 persons) ||popul | population (unit: 1000 persons) ||pcinc | per capita income (unit: \$ per capita) ||afdcprc | percent of population on AFDC ||afdcpay | average monthly AFDC payment (unit: \$) ||afdcinc | afdcpay as % per capita income ||stateabb | state postal code ||state | name of state ||bedspc | hospital beds per capita ||povrate | % population below poverty line ||physicpc | physicians per capita ||GovDem | = 1 if the state had a democratic governor in 1986, = 0 otherwise (i.e. Republican) \*| \* Source: https://en.wikipedia.org/wiki/1986 United States gubernatorial elections PreambleUse the following code cell to load the dataset and any packages you plan to use (at least **tidyverse** and **haven**).
###Code
# Add your preamble code here
###Output
_____no_output_____
###Markdown
Exercise 1 **1.** Please provide a table of summary statistics (mean, median, std, min and max) for percent low birth weight, infant mortality rate, physicians per capita, average AFDC monthly payment, hospital beds per capita, percent population below the poverty line, and the indicator for Democratic Governor.*(Hint: See [Coding Bootcamp Part 5](https://r.datahub.berkeley.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fds-modules%2FENVECON-118&urlpath=tree%2FENVECON-118%2FSpring2021-J%2FSections%2FCoding+Bootcamps) for how to do this using the `stargazer` package.)* ➡️ Type your written work for and answer to part Exercise 1 - Q1 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**2.** Please graph the histogram of percent low birthweight for all the states. Then, plot another histogram for states with average monthly AFDC payment (afdcpay) larger than the median. Finally, plot the same graph for states with average monthly AFDC payment (afdcpay) less than or equal to the median. What is the average low birthweight percentage for each group? ➡️ Type your written work for and answer to part Exercise 1 - Q2 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**3.** Produce a new plot that overlaps the histograms for the below and above median groups. What do you conclude in terms of similarities and differences in the two histograms? (Keep answers brief: max 2-4 sentences)(Hint: see the Histograms section of [Coding Bootcamp Part 4](https://r.datahub.berkeley.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fds-modules%2FENVECON-118&urlpath=tree%2FENVECON-118%2FSpring2021-J%2FSections%2FCoding+Bootcamps) for help overlapping histograms) ➡️ Type your written work for and answer to part Exercise 1 - Q3 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**4.** Estimate the model of low percent birth weight as a linear function of a constant, average monthly AFDC payment, and physicians per capita. Comment on the estimated intercept and each of the right hand side variables' estimated parameters in terms of Sign, Size, and Significance (SSS). ➡️ Type your written work for and answer to part Exercise 1 - Q4 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**5.** In absolute terms, does the average monthly AFDC payment or the per capita number of physicians matter more in predicting the percent low birth weight in a state? (Use standardized approach) ➡️ Type your written work for and answer to part Exercise 1 - Q5 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**6.** Estimate the model of low percent birth weight as a linear function of a constant, average monthly AFDC payment, hospital beds per capita, physicians per capita, and the percent below poverty rate. Test the joint significance of the hospital beds per capita and physicians per capita variables at the 10% significance level. ➡️ Type your written work for and answer to part Exercise 1 - Q6 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**7.** Specify a model to predict the **average low percent birthweight** of a state with a monthly average AFDC payment equal to \$550, 0.008 hospital beds per capita, and 0.092 physicians per capita. ➡️ Type your written work for and answer to part Exercise 1 - Q7 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**8.** Run that same regression model specified in 7 that allows you to easily obtain "inputs" for a confidence interval for that average prediction and report the 99% confidence interval.(Hint for 7 and 8: Generate the new x variables such that when you regress y on newx1 newx2 newx3 the constant is what you need, as we did in lecture 14.) ➡️ Type your written work for and answer to part Exercise 1 - Q8 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**9.** Estimate the model that allows you to easy obtain the 99% confidence interval for the average predicted **infant mortality rate** for a state with a monthly average AFDC payment equal to \$550, 0.008 hospital beds per capita, and 0.092 physicians per capita. ➡️ Type your written work for and answer to part Exercise 1 - Q9 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**10.** Suppose I told you I want you to construct the 99% confidence interval for a specific state's predicted **infant mortality rate** according to the model you estimated in 9 above. **True or False?** "The 99% confidence interval for a specific state's predicted infant mortality rate is wider than the CI you obtained in 9 for the predicted average infant mortality rate?" Explain briefly. (Hint: See lectures 14 / 15) ➡️ Type your written work for and answer to part Exercise 1 - Q10 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**11.** First, estimate the model (A) of infant mortality rate as a linear function of a constant, monthly average AFDC payment, per capita hospital beds, per capita hospital beds squared, per capita physicians, and the poverty rate. Next, consider and estimate an alternative model (B) of infant mortality rate as a linear function of a constant, the monthly average AFDC payment, the log of per capita hospital beds, per capita physicians, and the poverty rate. Which one do you prefer,model (A) or (B)? (Hint: See lecture 13) ➡️ Type your written work for and answer to part Exercise 1 - Q11 here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
Exercise 2 Consider two models below:\begin{align*}lowbrth &= \beta_0 + \beta_1 AFDC + \beta_2 bedspc + \beta_3 physicpc + u & (\text{model 2.A}) \\log(lowbrth) &= \alpha_0 + \alpha_1 AFDC + \alpha_2 bedspc + \alpha_3 physicpc + v & (\text{model 2.B})\end{align*} **(a)** Predict the average percent low birth-weight for a state with afdcpay = 400, bedspc = 0.001 and physicpc = 0.1 using model 2.B.(Hint: See lectures 14/15) ➡️ Type your written work for and answer to part Exercise 2 - Part (a) here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**(b)** Which of the models do you prefer? Show all the calculations and the required values obtained via R to answer this question. (Hint: See lecture 15) ➡️ Type your written work for and answer to part Exercise 2 - Part (b) here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
Exercise 3 **(a)** Estimate model 2.B separately for states with a democratic governor and for those with a republican governor in 1986. Formally test at the 10% significance level whether the estimation regression should be done separately or whether we can pool the data. (Hint: See lecture 17 slide 37 onward) ➡️ Type your written work for and answer to part Exercise 3 - Part (a) here.
###Code
# insert code here
###Output
_____no_output_____
###Markdown
**(b)** I would like to know whether the effect of the monthly AFDC payments on the log of percent low birthweight (in model 2.B) differs depending on whether the governor is democrat or not (republican in that case). Estimate a model that enables you to test this and please interpret your findings. Compare the p value for the estimated coefficient of interest to the 10 percent significance level to conclude whether you reject the null of no heterogeneity in the effect of monthly AFDC payments on the percent low birth weight due to the governor's party affliation, against a two sided alternative, holding all else equal. (Hint: Generate the interaction you need and add it to the regression.) ➡️ Type your written work for and answer to part Exercise 3 - Part (b) here.
###Code
# insert code here
###Output
_____no_output_____ |
error_and_fp/Truncation vs Rounding.ipynb | ###Markdown
Truncation Error vs Rounding Error In this notebook, we'll investigate two common sources of error: Truncation error and rounding error.
###Code
import numpy as np
import matplotlib.pyplot as pt
###Output
_____no_output_____
###Markdown
**Task:** Approximate a function (here: a parabola, by a line)
###Code
center = -1
width = 6
def f(x):
return - x**2 + 3*x
def df(x):
return -2*x + 3
grid = np.linspace(center-width/2, center+width/2, 100)
fx = f(grid)
pt.plot(grid, fx)
pt.plot(grid, f(center) + df(center) * (grid-center))
pt.xlim([grid[0], grid[-1]])
pt.ylim([np.min(fx), np.max(fx)])
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.