path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
HomeWork/Day_013_HW.ipynb | ###Markdown
常用的 DataFrame 操作* merge / transform* subset* groupby [作業目標]- 練習填入對應的欄位資料或公式, 完成題目的要求 [作業重點]- 填入適當的輸入資料, 讓後面的程式顯示題目要求的結果 (Hint: 填入對應區間或欄位即可, In[4]~In[6], Out[4]~In[6])- 填入z轉換的計算方式, 完成轉換後的數值 (Hint: 參照標準化公式, In[7])
###Code
# Import 需要的套件
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# 設定 data_path
dir_data = './data/'
f_app = os.path.join(dir_data, 'application_train.csv')
print('Path of read in data: %s' % (f_app))
app_train = pd.read_csv(f_app)
app_train.head()
###Output
Path of read in data: ./data/application_train.csv
###Markdown
作業1. 請將 app_train 中的 CNT_CHILDREN 依照下列規則分為四組,並將其結果在原本的 dataframe 命名為 CNT_CHILDREN_GROUP * 0 個小孩 * 有 1 - 2 個小孩 * 有 3 - 5 個小孩 * 有超過 5 個小孩2. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,列出各組的平均 AMT_INCOME_TOTAL,並繪製 baxplot3. 請根據 CNT_CHILDREN_GROUP 以及 TARGET,對 AMT_INCOME_TOTAL 計算 [Z 轉換](https://en.wikipedia.org/wiki/Standard_score) 後的分數
###Code
#1
"""
Your code here
"""
cut_rule = pd.IntervalIndex.from_tuples([(0,0),(1,2),(3,5),(5,max(app_train['CNT_CHILDREN']))])
app_train['CNT_CHILDREN_GROUP'] = pd.cut(app_train['CNT_CHILDREN'].values, cut_rule, include_lowest=True)
app_train['CNT_CHILDREN_GROUP'].value_counts()
#2-1
"""
Your code here
"""
grp = ['CNT_CHILDREN_GROUP', 'TARGET']
grouped_df = app_train.groupby(grp)['AMT_INCOME_TOTAL']
grouped_df.mean()
#2-2
"""
Your code here
"""
plt_column = ['AMT_INCOME_TOTAL']
plt_by = ['CNT_CHILDREN_GROUP', 'TARGET']
app_train.boxplot(column=plt_column, by = plt_by, showfliers = False, figsize=(12,12))
plt.suptitle('')
plt.show()
#3
"""
Your code here
z = {x- μ / σ}
μ is the mean of the population.
σ is the standard deviation of the population.
"""
app_train['AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET'] = grouped_df.apply(lambda x:(x-x.mean())/x.std())
app_train[['AMT_INCOME_TOTAL','AMT_INCOME_TOTAL_Z_BY_CHILDREN_GRP-TARGET']].head()
###Output
_____no_output_____ |
4. Time Series/AZ/More Forecasting/05_forecasting_with_stateful_rnn.ipynb | ###Markdown
Forecasting with a stateful RNN Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Stateful RNN Forecasting
###Code
def sequential_window_dataset(series, window_size):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=window_size, drop_remainder=True) # next window
ds = ds.flat_map(lambda window: window.batch(window_size + 1)) # note no shuffle is used bc we want it ordered
ds = ds.map(lambda window: (window[:-1], window[1:]))
return ds.batch(1).prefetch(1)
for X_batch, y_batch in sequential_window_dataset(tf.range(10), 3):
print(X_batch.numpy(), y_batch.numpy())
class ResetStatesCallback(keras.callbacks.Callback): # reset the state of the batch window
def on_epoch_begin(self, epoch, logs):
self.model.reset_states()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = sequential_window_dataset(x_train, window_size)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True,
batch_input_shape=[1, None, 1]), # here you need batch size which is 1 and time steps and input dimention which is 1 b/c univariate
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0) # for stabalization can be done by normalization.
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 30))
reset_states = ResetStatesCallback() # resets the state of the batch window
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100,
callbacks=[lr_schedule, reset_states], verbose=0) # calls reset
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = sequential_window_dataset(x_train, window_size)
valid_set = sequential_window_dataset(x_valid, window_size)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True, # statefull is true here
batch_input_shape=[1, None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
reset_states = ResetStatesCallback()
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint, reset_states], verbose=0) # note three calls here.
model = keras.models.load_model("my_checkpoint.h5")
model.reset_states()
rnn_forecast = model.predict(series[np.newaxis, :, np.newaxis]) # 3 dimention here
rnn_forecast = rnn_forecast[0, split_time - 1:-1, 0]
rnn_forecast.shape
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____ |
Case Study 2/Rate of Penetration (ROP)/Rate of Penetration.ipynb | ###Markdown
**Rate of Penetration (ROP)**
###Code
from google.colab import files
uploaded = files.upload()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df=pd.read_csv('ROP_DataSet.csv')
df.describe().transpose()
f, axes =plt.subplots(4, 2, figsize=(12, 12))
sns.distplot(df['Hole Depth'], color="red", ax=axes[0, 0],
axlabel='Measured Depth (ft)')
sns.distplot(df['Hook Load'], color="olive", ax=axes[0, 1],
axlabel='Hook Load (Klbs)')
sns.distplot(df['Rate Of Penetration'], color="blue", ax=axes
[1, 0],axlabel='Rate of Penetration (ft/hr)')
sns.distplot(df['Rotary RPM'], color="orange", ax=axes[1, 1],
axlabel='Rotary rpm')
sns.distplot(df['Rotary Torque'], color="black",
ax=axes[2, 0],axlabel='Rotary Torque (Klbs-ft)')
sns.distplot(df['Weight on Bit'], color="green",
ax=axes[2, 1],axlabel='Weight on bit (Klbs)')
sns.distplot(df['Differential Pressure'], color="brown",
ax=axes[3, 0],axlabel='Differential Pressure (psi)')
sns.distplot(df['Gamma at Bit'], color="gray", ax=axes[3, 1],
axlabel='Gamma Ray at Bit (gAPI)')
plt.tight_layout()
f, axes =plt.subplots(4, 2, figsize=(12, 12))
sns.boxplot(df['Hole Depth'], color="red", ax=axes[0, 0])
sns.boxplot(df['Hook Load'], color="olive", ax=axes[0, 1])
sns.boxplot(df['Rate Of Penetration'], color="blue",
ax=axes[1, 0])
sns.boxplot(df['Rotary RPM'], color="orange", ax=axes[1, 1])
sns.boxplot(df['Rotary Torque'], color="black", ax=axes[2, 0])
sns.boxplot(df['Weight on Bit'], color="green", ax=axes[2, 1])
sns.boxplot(df['Differential Pressure'], color="brown",
ax=axes[3, 0])
sns.boxplot(df['Gamma at Bit'], color="gray", ax=axes[3, 1])
plt.tight_layout()
plt.figure(figsize=(12,10))
sns.heatmap(df.corr(), annot=True, linecolor='white',
linewidths=2, cmap='coolwarm')
from sklearn import preprocessing
scaler=preprocessing.MinMaxScaler(feature_range=(0,1))
scaler.fit(df)
df_scaled=scaler.transform(df)
df_scaled=pd.DataFrame(df_scaled, columns=['Hole Depth', 'Hook Load', 'Rotary RPM', 'Rotary Torque', 'Weight on Bit',
'Differential Pressure', 'Gamma at Bit', 'Rate Of Penetration'])
y_scaled=df_scaled[['Rate Of Penetration']]
x_scaled=df_scaled.drop(['Rate Of Penetration'], axis=1)
from sklearn.model_selection import train_test_split
seed=1000
np.random.seed(seed)
X_train, X_test,y_train, y_test=train_test_split(x_scaled,
y_scaled, test_size=0.30)
from sklearn.svm import SVR
np.random.seed(seed)
SVM =SVR(kernel='rbf', gamma=1.5,C=5)
SVM.fit(X_train,np.ravel(y_train))
y_pred_train=SVM.predict(X_train)
y_pred_test=SVM.predict(X_test)
corr_train=np.corrcoef(y_train['Rate Of Penetration'],
y_pred_train) [0,1]
print('ROP Train Data r^2=',round(corr_train**2,4),'r=',
round(corr_train,4))
corr_test=np.corrcoef(y_test['Rate Of Penetration'], y_pred_test)[0,1]
print('ROP Test Data rˆ2',round(corr_test**2,4),'r',round(corr_test,4))
plt.figure(figsize=(10,8))
plt.plot(y_test['Rate Of Penetration'], y_pred_test, 'b.')
plt.xlabel('ROP Testing Actual')
plt.ylabel('ROP Testing Prediction')
plt.title('ROP Testing Actual vs. Prediction')
from sklearn import metrics
print('Testing ROP MAE:', round(metrics.mean_absolute_error(y_test
['Rate Of Penetration'], y_pred_test),4))
print('Testing ROP MSE:', round(metrics.mean_squared_error(y_test
['Rate Of Penetration'], y_pred_test),4))
print('Testing ROP RMSE:', round(np.sqrt
(metrics.mean_squared_error(y_test['Rate Of Penetration'],y_pred_test)),4))
from google.colab import files
uploaded = files.upload()
df_blind=pd.read_csv('ROP_Blind_DataSet.csv')
scaled_blind=scaler.transform(df_blind)
scaled_blind=pd.DataFrame(scaled_blind, columns=['Hole Depth',
'Hook Load', 'Rotary RPM', 'Rotary Torque','Weight on Bit',
'Differential Pressure', 'Gamma at Bit','Rate Of Penetration'])
y_scaled_blind=scaled_blind['Rate Of Penetration']
x_scaled_blind=scaled_blind.drop(['Rate Of Penetration'],axis=1)
y_pred_blind=SVM.predict(x_scaled_blind)
corr_test=np.corrcoef(y_scaled_blind, y_pred_blind) [0,1]
print('ROP Blind Data r^2=',round(corr_test**2,4),'r=',
round(corr_test,4))
plt.figure(figsize=(10,8))
plt.plot(y_scaled_blind, y_pred_blind, 'g.')
plt.xlabel('ROP Blind Actual')
plt.ylabel('ROP Blind Prediction')
plt.title('ROP Blind Actual vs. Prediction')
plt.figure(figsize=(6,12))
sns.scatterplot(y_scaled_blind, df_blind['Hole Depth'],
label='Actual Blind Data', color='blue')
sns.scatterplot(y_pred_blind, df_blind['Hole Depth'],
label='Predicted Blind Data', color='green')
from sklearn.ensemble import ExtraTreesRegressor
np.random.seed(seed)
ET=ExtraTreesRegressor(n_estimators=100,criterion='mse',
max_depth=None, min_samples_split=2,
min_samples_leaf=1)
ET.fit(X_train,np.ravel(y_train))
y_pred_train=ET.predict(X_train)
y_pred_test=ET.predict(X_test)
corr_train=np.corrcoef(y_train['Rate Of Penetration'],
y_pred_train) [0,1]
print('ROP Train Data r^2=',round(corr_train**2,4),'r=',
round(corr_train,4))
corr_test=np.corrcoef(y_test['Rate Of Penetration'], y_pred_test)\
[0,1]
print('ROP Test Data r^2=',round(corr_test**2,4),'r=',
round(corr_test,4))
plt.figure(figsize=(10,8))
plt.plot(y_test['Rate Of Penetration'], y_pred_test, 'b.')
plt.xlabel('ROP Testing Actual')
plt.ylabel('ROP Testing Prediction')
plt.title('ROP Testing Actual vs. Prediction Using Extra Trees Model')
y_pred_blind=ET.predict(x_scaled_blind)
corr_test=np.corrcoef(y_scaled_blind, y_pred_blind) [0,1]
print('ROP Blind Data r^2=',round(corr_test**2,4),'r=',
round(corr_test,4))
plt.figure(figsize=(10,8))
plt.plot(y_scaled_blind, y_pred_blind, 'g.')
plt.xlabel('ROP Blind Actual')
plt.ylabel('ROP Blind Prediction')
plt.title('ROP Blind Actual vs. Prediction Using Extra Trees Model')
plt.figure(figsize=(6,12))
sns.scatterplot(y_scaled_blind, df_blind['Hole Depth'],
label='Actual Blind Data', color='blue')
sns.scatterplot(y_pred_blind, df_blind['Hole Depth'], label='\
Predicted Blind Data', color='green')
feature_names =df.columns[:-1]
plt.figure(figsize=(10,8))
feature_imp =pd.Series(ET.feature_importances_,
index=feature_names).sort_values(ascending=False)
sns.barplot(x=feature_imp, y=feature_imp.index)
plt.xlabel('Feature Importance Score Using Extra Trees')
plt.ylabel('Features')
plt.title("Feature Importance Ranking")
###Output
_____no_output_____ |
notebooks/Chapter 19 - Filtering Out Specific Fields from a GenBank File.ipynb | ###Markdown
Python for Bioinformatics-----------------------------This Jupyter notebook is intented to be used alongside the book [Python for Bioinformatics](http://py3.us/) Chapter 19: Filtering Out Specific Fields from a GenBank File----------------------------- **Note:** Before opening the file, this file should be accesible from this Jupyter notebook. In order to do so, the following commands will download these files from Github and extract them into a directory called samples.
###Code
!curl https://raw.githubusercontent.com/Serulab/Py4Bio/master/samples/samples.tar.bz2 -o samples.tar.bz2
!mkdir samples
!tar xvfj samples.tar.bz2 -C samples
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 16.5M 100 16.5M 0 0 22.0M 0 --:--:-- --:--:-- --:--:-- 22.0M
mkdir: cannot create directory 'samples': File exists
BLAST_output.xml
TAIR7_Transcripts_by_map_position.gz
pMOSBlue.txt
fishbacteria.csv
UniVec_Core.nsq
t3beta.fasta
PythonU.db
input4align.dnd
pdb1apk.ent.gz
readme.txt
contig1.ace
example.aln
hsc1.fasta
bioinfo/seqs/15721870.fasta
primers.txt
bioinfo/seqs/4586830.fasta
bioinfo/seqs/7638455.fasta
GSM188012.CEL
3seqs.fas
sampleX.fas
sampleXblast.xml
B1.csv
phd1
conglycinin.phy
bioinfo/seqs/218744616.fasta
spfile.txt
bioinfo/seqs/513419.fasta
bioinfo/seqs/513710.fasta
prot.fas
cas9align.fasta
seqA.fas
bioinfo/seqs/
bioinfo/
pdbaa
other.xml
vectorssmall.fasta
t3.fasta
a19.gp
data.csv
input4align.fasta
B1IXL9.txt
fasta22.fas
bioinfo/seqs/7415878.fasta
bioinfo/seqs/513718.fasta
bioinfo/seqs/513719.fasta
bioinfo/seqs/6598312.fasta
UniVec_Core.nin
Q5R5X8.fas
bioinfo/seqs/513717.fasta
BcrA.gp
bioinfo/seqs/2623545.fasta
bioinfo/seqs/63108399.fasta
conglycinin.dnd
NC2033.txt
fishdata.csv
uniprotrecord.xml
BLAST_output.html
Q9JJE1.xml
test3.csv
UniVec_Core.nhr
sampledata.xlsx
UniVec_Core
NC_006581.gb
conglycinin.multiple.phy
conglycinin.fasta
###Markdown
**Listing 19.1:** genbank1.py: Extract sequences from a Genbank file
###Code
from Bio import SeqIO, SeqRecord, Seq
from Bio.Alphabet import IUPAC
GB_FILE = 'samples/NC_006581.gb'
OUT_FILE = 'nadh.fasta'
with open(GB_FILE) as gb_fh:
record = SeqIO.read(gb_fh, 'genbank')
seqs_for_fasta = []
for feature in record.features:
# Each Genbank record may have several features, the program
# will walk over all of them.
qualifier = feature.qualifiers
# Each feature has several parameters
# Pick selected parameters.
if 'NADH' in qualifier.get('product',[''])[0] and \
'product' in qualifier and 'translation' in qualifier:
id_ = qualifier['db_xref'][0][3:]
desc = qualifier['product'][0]
# nadh_sq is a NADH protein sequence
nadh_sq = Seq.Seq(qualifier['translation'][0], IUPAC.protein)
# 'srec' is a SeqRecord object from nadh_sq sequence.
srec = SeqRecord.SeqRecord(nadh_sq, id=id_, description=desc)
# Add this SeqRecord object into seqsforfasta list.
seqs_for_fasta.append(srec)
with open(OUT_FILE, 'w') as outf:
# Write all the sequences as a FASTA file.
SeqIO.write(seqs_for_fasta, outf, 'fasta')
###Output
_____no_output_____
###Markdown
**Listing 19.2:** genbank2.py: Extract upstream regions
###Code
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
GB_FILE = 'samples/NC_006581.gb'
OUT_FILE = 'tg.fasta'
with open(GB_FILE) as gb_fh:
record = SeqIO.read(gb_fh, 'genbank')
seqs_for_fasta = []
tg = (['cox2'],['atp6'],['atp9'],['cob'])
for feature in record.features:
if feature.qualifiers.get('gene') in tg and feature.type=='gene':
# Get the name of the gene
genename = feature.qualifiers.get('gene')
# Get the start position
startpos = feature.location.start.position
# Get the required slice
newfrag = record.seq[startpos-1000: startpos]
# Build a SeqRecord object
newrec = SeqRecord(newfrag, genename[0] + ' 1000bp upstream',
'','')
seqs_for_fasta.append(newrec)
with open(OUT_FILE,'w') as outf:
# Write all the sequences as a FASTA file.
SeqIO.write(seqs_for_fasta, outf, 'fasta')
###Output
_____no_output_____ |
Elevator.ipynb | ###Markdown
ProblemUsing the smartphone's accelerometer inside an elevator, we want to estimate the speed and the height travelled.
###Code
# Load libraries
%pylab inline
import pandas as pd
from scipy import integrate
import numpy as np
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Read the dataThe data has been generated with the app _Science Journal_, available for _Android_. It registers, among others, the accelerations of the device.
###Code
# Import the time series corresponding to acceleration in the vertical axis
data = pd.read_csv('./data/skyscrapper_up.csv', usecols=['relative_time', 'AccZ'])
# Drop the not-a-number cases
data = data.dropna()
# Extract the relevant information in a matrix form
ts = data.relative_time/1000 # Time (in s)
az = data.AccZ # Vertical acceleration (in m/s^2)
ts = ts.as_matrix()
az = az.as_matrix()
###Output
_____no_output_____
###Markdown
Clean the time seriesThe time span of the recorded time series is a bit longer than desired. It registered the movement of the experimenter hand while setting the phone on the floor of the elevator, as well as the recovery of it. We want to cut off these chunks of the time series.
###Code
# indices[0] = start of experiment
# indices[1] = phone on the floor
# indices[2] = end of phone on the floor
# indices[3] = end of experiment
indices = [0, 200, 1700, ts.size]
# Time series while setting the phone on the floor
ts_init = ts[indices[0]:indices[1]]
az_init = az[indices[0]:indices[1]]
# The really interesting time series: while the phone is on the floor and the only registered movement is the elevator's
ts_experiment = ts[indices[1]:indices[2]]
az_experiment = az[indices[1]:indices[2]]
# Time series while taking the phone from the floor
ts_out = ts[indices[2]:indices[3]]
az_out = az[indices[2]:indices[3]]
###Output
_____no_output_____
###Markdown
Plot time series
###Code
plt.rcParams['figure.figsize'] = (11, 8)
plt.rcParams['font.size'] = 10.0
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
plt.title('Complete time series')
plt.plot(ts_init, az_init, color='gray', alpha=0.4)
plt.plot(ts_experiment, az_experiment, color='red')
plt.plot(ts_out, az_out, color='green', alpha=0.4)
plt.ylabel('Acceleration $(m / s^2)$')
plt.legend(['Leaving phone on the floor', 'Phone on the floor', 'Taking phone from the floor']);
ax2 = fig.add_subplot(2, 1, 2, sharex=ax1)
plt.title('Detail')
plt.plot(ts_experiment, az_experiment, color='red')
plt.xlabel('$t (s)$')
plt.ylabel('Acceleration $(m / s^2)$');
###Output
_____no_output_____
###Markdown
Estimate gThe phone registers simultaneously two accelerations:* That due to the elevator's movement ($a$).* That due to the Earth's gravitational field ($g$).Despite we know that, in the Earth's surface, $g \approx 9.8 m/s^2$, we don't know anything about possible systematic errors of our device. Indeed, a quick look at our previous figure shows that the registered $g$ is slightly higher than $10 m/s^2$. In order to assess this problem, we'll estimate the measured value of $g$ as the mean value of our main time series.This gravitational acceleration is not relevant to the problem we want to solve, so we'll remove its effect by substracting its estimated value.
###Code
g = az_experiment.mean()
az_experiment_detrended = az_experiment - g
###Output
_____no_output_____
###Markdown
IntegrateThe relationship between position ($x$), speed ($v$) and acceleration ($a$) is well known:$$v(t) = x'(t)$$and:$$a(t) = v'(t)$$Using anti derivatives we can go the other way around:$$v(t) = \int_{t_0}^t a(s) ds$$and:$$x(t) = \int_{t_0}^t v(s) ds$$
###Code
# Approximate antiderivative of a
v = np.zeros(ts_experiment.size-1)
for i in range(1, ts_experiment.size):
y_temp = az_experiment_detrended[:i]
t_temp = ts_experiment[:i]
v[i-1] = integrate.simps(y_temp, t_temp, even='avg')
# Approximate antiderivative of v
x = np.zeros(ts_experiment.size-2)
for i in range(1, ts_experiment.size-1):
y_temp = v[:i]
t_temp = ts_experiment[:i]
x[i-1] = integrate.simps(y_temp, t_temp, even='avg')
###Output
_____no_output_____
###Markdown
Plot the conclusions
###Code
plt.subplot(311)
plt.title('Acceleration')
plt.plot(ts_experiment, az_experiment_detrended, color='red')
plt.ylabel('Acceleration $(m / s^2)$')
plt.subplot(312)
plt.title('Speed')
plot(ts_experiment[1:], v, color='blue')
plt.ylabel('Speed $(m / s)$')
plt.subplot(313)
plt.title('Position')
plot(ts_experiment[2:], x, color='green')
plt.ylabel('Position $(m)$')
plt.xlabel('Time $(s)$');
print('The estimated g is {0:.2f} (m/s^2).'.format(g))
print('The travelled height is {0:.2f} (m).'.format(x.max()))
print('The maximum speed is {0:.2f} (m/s).'.format(v.max()))
print('The maximum acceleration is {0:.2f} (m/s^2).'.format(az_experiment_detrended.max()))
###Output
The estimated g is 10.31 (m/s^2).
The travelled height is 74.30 (m).
The maximum speed is 4.01 (m/s).
The maximum acceleration is 0.93 (m/s^2).
###Markdown
Elevator problemThis is a design problem that will ask for some pseudo-code to illustrate your ideas.The thought process is more important than syntax or programming details, so don'tworry too much about those. In this problem, we will start with a very simple set of requirementsand add some more later.Please **feel free to ask clarifying questions at any point**. Understanding the requirements, here as inreal life, is part of the challenge. The problem, Part 1Create a class to represent an elevator in a hotel.The only thing the elevator can do is go to a different floor.Details of people pressing buttons are not included for this part.The elevator could print out its current state like this:```Floor 1Floor 3Floor 1Floor 7..etc..```
###Code
# Add your code/pseudo-code
class Elevator:
pass
###Output
_____no_output_____ |
Thompson_Motif_Index.ipynb | ###Markdown
Thompson Motif IndexKatja Mellmann: Thompson's Motif-Index as CSV File (version 1.2). Open Science Framework (OSF), 2020. DOI: 10.17605/OSF.IO/XEB67 (https://osf.io/xeb67/), also available via https://github.com/KatjaMellmann/TMI_as_CSV/, provides a handy lookup reference for the Thompson Motif index.
###Code
import pandas as pd
thompson_motif_df = pd.read_csv("https://github.com/KatjaMellmann/TMI_as_CSV/blob/main/tmi.csv?raw=true")
thompson_motif_df.fillna("", inplace=True)
thompson_motif_df.head()
###Output
_____no_output_____
###Markdown
Tidy up some of the column names:
###Code
thompson_motif_df.rename(columns={"[sorting field]": "sortfield",
'section ("tens")': "section10s"}, inplace=True)
thompson_motif_df
###Output
_____no_output_____
###Markdown
We can also process it further to extract out the codes (we really should combine the following so we only iterate through the whole dataset once):
###Code
def motif_splitter(cell):
"""Split out motif label."""
if cell:
parts = cell.split()
return pd.Series({"motif_label": " ".join(parts[1:]).strip(".")})
return pd.Series({"motif_label":''})
thompson_motif_df[["motif_label"]] = thompson_motif_df["MOTIF"].apply(motif_splitter)
thompson_motif_df.head()
def chapter_splitter(cell):
"""Split out Chapter label."""
if cell:
parts = cell.split()
return pd.Series({"chapter_label": " ".join(parts[1:]).strip(".")})
return pd.Series({"chapter_label":''})
thompson_motif_df[["chapter_label"]] = thompson_motif_df["chapter"].apply(chapter_splitter)
thompson_motif_df.head()
def section_tens_splitter(cell):
"""Split out section(10s) Label and code."""
if cell:
parts = cell.split()
return pd.Series({"section10s_label": " ".join(parts[1:]).strip(".")})
return pd.Series({"section10s_label":''})
thompson_motif_df[["section10s_label"]] = thompson_motif_df['section10s'].apply(section_tens_splitter)
thompson_motif_df.head()
def section_division1_splitter(cell):
"""Split out division1 Label and code."""
if cell:
parts = cell.split()
return pd.Series({"division1_code": parts[0].strip("."),
"division1_label": " ".join(parts[1:]).strip(".")})
return pd.Series({"division1_code":'', "division1_label":''})
thompson_motif_df[["division1_code", "division1_label"]] = thompson_motif_df['division1'].apply(section_division1_splitter)
thompson_motif_df.head()
###Output
_____no_output_____
###Markdown
We can add this table directly to a simple database. First, create the database:
###Code
from sqlite_utils import Database
db_name = "motifs_demo.db"
# While developing the script, recreate database each time...
db = Database(db_name, recreate=True)
###Output
_____no_output_____
###Markdown
Now add the dataframe to the database as a databse table:
###Code
#db["motifs_km"].drop()
#db["motifs_km_fts"].drop()
db["motifs_km"].create({
"code": str,
"sortfield": str,
"1st ed.": str,
"chapter": str,
"division1": str,
"division2": str,
"division3": str,
"section10s": str,
"MOTIF": str,
"bibliographies": str,
"motif_label": str,
"chapter_label": str,
"section10s_label": str,
"division1_code": str,
"division1_label": str,
})
# Create a full text search table to improve search support
db["motifs_km"].enable_fts(["bibliographies", "motif_label", "sortfield"], create_triggers=True, tokenize="porter")
db["motifs_km"].insert_all(thompson_motif_df.to_dict(orient="records"))
#thompson_motif_df.to_sql("motifs_km", index=False, if_exists="replace", con=db.conn)
###Output
_____no_output_____
###Markdown
And query:
###Code
from pandas import read_sql
q = "SELECT * FROM motifs_km_fts LIMIT 3"
read_sql(q, db.conn)
# FTS search
q = f"""
SELECT motifs_km_fts.* FROM motifs_km_fts
WHERE motifs_km_fts MATCH {db.quote('cockroach')};
"""
read_sql(q, db.conn)
###Output
_____no_output_____
###Markdown
MOMFER - Meerten online motif finderThe *MOMFER - Meerten online motif finder*, http://www.momfer.ml, is an online search engine over motifs.Source code and related data files appear to be available at: https://github.com/fbkarsdorp/tmi *TMI: Interface on Thompson's Motif Index"*.See also:- *Folgert Karsdorp, Marten van der Meulen, Theo Meder, Antal van den Bosch: MOMFER. A Search Engine of Thompson's Motif-Index of Folk Literature. In: Folklore 126 (2015), no. 1, 37–52. https://doi.org/10.1080/0015587X.2015.1006954 ]*- *Thierry Declerck, Antónia Kostová, Lisa Schäfer: Linked Data Access to Folktales classified by Thompson's Motifs and Aarne-Thompson-Uther's Types. In: Rhian Lewis et al. (Eds.): Digital Humanities 2017 (DH2017), Montréal, August 8-11, 2017, Conference Abstracts. Montréal: ADHO, https://dh2017.adho.org/abstracts/465/465.pdf*- https://www.dfki.de/fileadmin/user_upload/import/9028_Dh2017_LOD_TMI-ATU_final.pdf The json data in the repository looks very reminiscent of Katja Mellmann's *Thompson's Motif-Index as CSV File* although there does appear to be some additional data, suc as the `locations` field:
###Code
import requests
momfer_json = requests.get("https://github.com/fbkarsdorp/tmi/blob/master/data/tmi.json?raw=true")
print(momfer_json.text[:1500])
momfer_json.json()[0]
tmi_data = requests.get("https://github.com/fbkarsdorp/tmi/blob/master/data/tmi-cleaned.txt?raw=true")
print(tmi_data.text[:1500])
###Output
A0. Creator.
A1. Identity of creator.
A1.1. Sun-god as creator.
A1.2. Grandfather as creator.
A1.3. Stone-woman as creator.
A1.4. Brahma as creator.
A2. Multiple creators.
A2.1. Three creators.
A2.2. First human pair as creators.
A3. Creative mother source of everything.
A5. Reason for creation.
A5.1. Gods make earth to have place to rest their feet.
A7. Creator's descendants.
A7.1. Creator has two sons.
A10. Nature of the creator.
A11. Invisible creator.
A11.1. Invisibility of creator learned from the impossibility of staring at the sun, his servant.
A12. Hermaphroditic creator.
A12.1. Male and female creators.
A13. Animal as creator.
A13.1. Beast as creator.
A13.1.1. Cow as creator.
A13.2. Bird as creator.
A13.2.1. Raven as creator.
A13.2.2. Eagle as creator of man.
A13.2.3. Black-winged bird as creator.
A13.3. Insect as creator.
A13.3.1. Spider as creator.
A13.3.2. Beetle as creator.
A13.4. Reptile as creator.
A13.4.1. Snake as creator.
A13.4.2. Worm as creator.
A15. Human creator.
A15.1. Female creator.
A15.1.1. Old woman as creator.
A15.2. Brothers as creators.
A15.3. Old man as creator.
A15.3.1. Old man with staff as creator.
A15.4. Artisan as creator.
A15.4.1. Potter as creator.
A17. Angel as creator.
A18. Pictorial representations of creator.
A18.1. Creator with dragon's head.
A18.2. Creator with two horns on head.
A18.3. Dwarfish creator.
A18.4. Creator clothed in bear-skin (or in leaves).
A18.5. Creator with hammer and chisel in hands.
A18.6. Creator with sun and mo
|
Untitled Folder/04. If ELse.ipynb | ###Markdown
if ... elif ... else
###Code
str ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
import random
stocks ={}
for i in range(20):
#generate 20 random key and values
stocks["".join(random.sample(str,4))] = random.randint(6,231)
stocks
print("{:9} {:12} {:10}".format("Value", "Fair Price", "Over-priced"))
print("-" * 31)
for stock, PE in stocks.items():
if PE < 10:
print("{:<}".format(stock))
elif PE < 20:
print("{:>17}".format(stock))
else:
print("{:>31}".format(stock))
###Output
Value Fair Price Over-priced
-------------------------------
JPTS
YTOV
ZSHR
QPGU
XHVC
RBFA
IDKN
WTBQ
JQFC
TRFY
KXLV
SGUH
NCPX
RQMP
IATD
FIVN
TJZI
COQB
CMEX
XCWQ
|
emotions/semeval2018/semeval-EDA.ipynb | ###Markdown
Correlation Among Emotions
###Code
train.corr()
import seaborn as sns
plt.subplots(figsize=(11,9))
sns.heatmap(train.corr())
###Output
_____no_output_____
###Markdown
Class Balance for all Labels
###Code
g_anger = train.groupby("anger").size()
g_anger
g_anger.plot.bar()
g_anticipation = train.groupby("anticipation").size()
g_anticipation
g_anticipation.plot.bar()
g_disgust = train.groupby("disgust").size()
g_disgust
g_disgust.plot.bar()
g_fear = train.groupby("fear").size()
g_fear
g_fear.plot.bar()
g_joy = train.groupby("joy").size()
g_joy
g_joy.plot.bar()
g_love = train.groupby("love").size()
g_love
g_love.plot.bar()
g_optimism = train.groupby("optimism").size()
g_optimism
g_optimism.plot.bar()
g_pessimism = train.groupby("pessimism").size()
g_pessimism
g_pessimism.plot.bar()
g_sadness = train.groupby("sadness").size()
g_sadness
g_sadness.plot.bar()
surprise = train.groupby("surprise").size()
surprise
surprise.plot.bar()
trust = train.groupby("trust").size()
trust
trust.plot.bar()
###Output
_____no_output_____
###Markdown
Number of Emotions per Tweet
###Code
train['total_emotions'] = train.apply(lambda x: x['anger']+x['anticipation']+x['disgust']+x['fear']+x['joy']+x['love']+x['optimism']+x['pessimism']+x['sadness']+x['surprise']+x['trust'], axis=1)
train.groupby("total_emotions").size()
train.groupby("total_emotions").size().plot.bar()
###Output
_____no_output_____
###Markdown
Number of Tokens Distribution
###Code
train.lengths.plot.hist()
###Output
_____no_output_____ |
materials/lessons/lesson_15_notebook.ipynb | ###Markdown
Video lesson \15 notebook Part 1: `Pandas` - counting, sorting, and grouping data
###Code
# # This code allows Cartopy to work with Google Colab
# # Run this code once per session, then comment it out
# !grep '^deb ' /etc/apt/sources.list | \
# sed 's/^deb /deb-src /g' | \
# tee /etc/apt/sources.list.d/deb-src.list
# !apt-get -qq update
# !apt-get -qq build-dep python3-cartopy
# !pip uninstall -y shapely
# !pip install shapely --no-binary shapely
# !pip install cartopy
# Import useful libraries
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from scipy import stats, interpolate
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Give Colab access to Google Drive
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
Load data from 2013 Puget Sound marine mammal aerial survey *Image: Harbor porpoises. Credit: [Bill Keener](https://medium.com/the-ark-newspaper/a-comeback-story-harbor-porpoises-return-to-the-bay-d1ed871e2790) / Golden Gate Cetacean Research.***Data source:** https://obis.org/dataset/0e80dc63-b47c-423a-8e34-362f3171ea18**Background:** "Marine mammal aerial surveys were conducted from 30 August through 4 September 2013 in the Pacific Northwest inland Puget Sound waters. This effort was in support of Marine Mammal Protection Act permit monitoring requirements for the U.S. Navy to conduct marine mammal studies in waters on or adjacent to U.S. Naval installations in the inland Puget Sound Region... . There were 779 marine mammal sightings of over 1716 animals representing 5785 km of flight..."
###Code
# Load CSV
# Note that we're parsing the "eventDate" column as datetimes
# and setting the "recordNumber" column as the index
mm_filepath = '/content/drive/MyDrive/OCEAN 215 - Autumn \'20/OCEAN 215 - Autumn \'20 - Course documents/Video lesson slides and notebooks/2020-11-22 - lesson #15 data/puget_sound_marine_mammal_surveys.csv'
mm_data = pd.read_csv(mm_filepath,parse_dates=['eventDate'],index_col='recordNumber')
# View data
display(mm_data)
###Output
_____no_output_____
###Markdown
Count unique rows of a Pandas Series using `.value_counts()` Think of this like creating a histogram.API reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.value_counts.html
###Code
# Count number of rows for each animal
#
# This shows that the most common animal logged is the harbor seal
display(mm_data['vernacularName'].value_counts())
# Count number of rows for each entry recording a certain number of animals
#
# This shows that most entries record just a single animal, but one entry recorded 150 animals!
display(mm_data['individualCount'].value_counts())
###Output
_____no_output_____
###Markdown
Sort a `Pandas` DataFrame by column label using `.sort_values()` The main argument is the column label. You can choose to modify the object "in-place" by setting `inplace=True`.API reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html
###Code
# Sort by date
mm_data = mm_data.sort_values('eventDate') # Option 1: create new copy, reassign to variable
mm_data.sort_values('eventDate',inplace=True) # Option 2: modify in-place
# Display sorted date column of DataFrame
# Note the index ("recordNumber") now appears to be sorted, too
display(mm_data['eventDate'])
###Output
_____no_output_____
###Markdown
Overview of `.groupby()`: the "split-apply-combine" method By "group by", we are referring to a process involving the following steps:1. **Splitting** the data into groups based on some column or criteria2. **Applying** a function to each group independently, often one that aggregates each group (i.e. a summary statistic like `.mean()`)3. **Combining** the results into a new DataFrameWe use the following syntax:> `df.groupby().()`API reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html Splitting the DataFrame using `.groupby()` We specify a single column name as an argument. This returns a `GroupBy` object, which is not useful to us until we call a function on it.
###Code
# Let's group the data using the column label "vernacularName" (the type of animal)
mm_data.groupby('vernacularName')
###Output
_____no_output_____
###Markdown
Learning more about a GroupBy object Attributes and functions shown in this section:* `.groups`* `.count()`* `.get_group()`* `.first()` and `.last()`
###Code
# .groups returns a dictionary with each group label and the index values within that group
mm_data.groupby('vernacularName').groups
# Recall that a dictionary is an object with key-value pairs
# and we can get the keys using .keys()
#
# Then we can convert to a list using list()
list(mm_data.groupby('vernacularName').groups.keys())
# .count() is similar to .value_counts() — it counts the number of rows with data in each group, for each column
#
# Here, notice that some columns are missing data for some groups.
# For instance, there's no order/family/genus specified for the generic label "mammals", probably because
# the scientists used that label for instances where they couldn't determine what type of mammal they had seen.
mm_data.groupby('vernacularName').count()
# Use .get_group() to get a subset of the original DataFrame
mm_data.groupby('vernacularName').get_group('Risso\'s Dolphin')
# Column indexing using brackets ([]) works normally on grouped data
mm_data.groupby('vernacularName').get_group('Risso\'s Dolphin')['eventDate']
# Use .first() or .last() to get the first or last rows in each group
#
# From this, we learn that the first sighting of harbor seals was on August 30, and that
# the researchers saw 25 harbor seals in that sighting.
mm_data.groupby('vernacularName').first()
###Output
_____no_output_____
###Markdown
Applying functions to aggregate data within groups In the "split-apply-combine" paradigm, this is the **apply** and **combine** part.You can apply NumPy functions like `.mean()`, `.sum()`, `.median()`, `.max()`, `.min()`, and `.std()` to calculate statistics for each numerical column of a grouped DataFrame (a `GroupBy` object).You can also call `.describe()` on a grouped DataFrame to get a number of useful summary statistics.
###Code
# Calculate averages over animal groups
#
# From this, we learn that the average location where California sea lions were sighted
# was 47.719°N, 122.585°W.
mm_data.groupby('vernacularName').mean()
# Calculate sums over animal groups
#
# From this, we learn that the researchers saw a total of 1513 harbor seals.
# Note that it doesn't really make sense to calculate the sum of latitude or longitude, but Pandas does it anyway.
mm_data.groupby('vernacularName').sum()
# Use .describe() to get common summary statistics by group for numerical columns
mm_data.groupby('vernacularName').describe()
###Output
_____no_output_____
###Markdown
Using `.groupby()` to group multiple columnsNote that specifying multiple columns creates a hierarchical index (an index with more than one level), also called a `MultiIndex`.
###Code
mm_data.groupby(['vernacularName','individualCount']).mean()
###Output
_____no_output_____
###Markdown
Putting it all together using a Cartopy map
###Code
# Set up Cartopy map
fig = plt.figure(figsize=(11,14))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(resolution='10m')
ax.add_feature(cfeature.LAND,color='papayawhip',alpha=0.5)
ax.add_feature(cfeature.OCEAN,color='cornflowerblue',alpha=0.4)
ax.set_extent([-123.0,-122.15,47.15,48.2])
gl = ax.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,linewidth=2,color='gray',alpha=0.5,linestyle=':')
gl.top_labels = False
gl.right_labels = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
ax.set_title('2013 Puget Sound marine mammal aerial survey')
# Group data by animal name
mm_data_grouped = mm_data.groupby('vernacularName')
# Use loop to iterate over each animal
for animal in list(mm_data_grouped.groups.keys()):
animal_total_count = mm_data_grouped.sum()['individualCount'].loc[animal]
animal_avg_lon = mm_data_grouped.mean()['decimalLongitude'].loc[animal]
animal_avg_lat = mm_data_grouped.mean()['decimalLatitude'].loc[animal]
animal_std_lon = mm_data_grouped.std()['decimalLongitude'].loc[animal]
animal_std_lat = mm_data_grouped.std()['decimalLatitude'].loc[animal]
# Add scatter points, with size corresponding to number of sightings (scaled by the 1/4 power)
# and location corresponding to average location
# and errorbars corresponding to standard deviation of location
ax.scatter(animal_avg_lon,animal_avg_lat,
s=200*animal_total_count**0.25,
label=animal,zorder=3,
transform=ccrs.PlateCarree())
ax.errorbar(animal_avg_lon,animal_avg_lat,
xerr=animal_std_lon,
yerr=animal_std_lat,
elinewidth=3,capsize=10)
# Create legend
plt.legend(labelspacing=2);
###Output
_____no_output_____
###Markdown
Part 2: `Pandas` - correlating, resampling, and smoothing data Load weather station data from Seattle and Spokane **Data source:** NOAA NCDC [Climate Data Online](https://www.ncdc.noaa.gov/cdo-web/)**Description:** Daily high/low temperature (in °F) and precipitation (in/day) from 1980-present for SeaTac (station 24233) and Spokane (station 24157).
###Code
# Load CSV
# Note that we're parsing the "eventDate" column as datetimes
# and setting the "recordNumber" column as the index
wa_filepath = '/content/drive/MyDrive/OCEAN 215 - Autumn \'20/OCEAN 215 - Autumn \'20 - Course documents/Video lesson slides and notebooks/2020-11-22 - lesson #15 data/wa_weather.csv'
wa_data = pd.read_csv(wa_filepath,parse_dates=['DATE'])
# View data
display(wa_data)
# Divide the data by location into two Pandas DataFrames
seattle = wa_data.loc[wa_data['NAME'] == 'SEATTLE TACOMA AIRPORT, WA US']
spokane = wa_data.loc[wa_data['NAME'] == 'SPOKANE INTERNATIONAL AIRPORT, WA US']
# Make the date column the index for each DataFrame
seattle.set_index('DATE',inplace=True)
spokane.set_index('DATE',inplace=True)
# Confirm that everything worked
display(seattle)
# Plot of the high temperature data
plt.figure(figsize=(18,4))
plt.plot(seattle['TMAX'].index.values,seattle['TMAX'].values,c='k',lw=0.25,alpha=0.6,label='Seattle')
plt.plot(spokane['TMAX'].index.values,spokane['TMAX'].values,c='b',lw=0.25,alpha=0.6,label='Spokane')
plt.grid()
plt.legend()
plt.ylabel('Temperature (°F)')
plt.title('Daily high temperatures at Seattle and Spokane weather stations');
###Output
_____no_output_____
###Markdown
Use `.groupby()` to calculate climatologies and annual averages If a Pandas Series or DataFrame's index has datetimes, it's called a `DatetimeIndex`. `DatetimeIndex` objects have a few useful attributes:* `.year`* `.month`* `.day`* `.dayofyear` (day-of-year goes from 1-366)
###Code
# This finds the year for each date
seattle.index.year
# This finds the day-of-year for each date
seattle.index.dayofyear
# We can group the data by day-of-year, then calculate the average over each day-of-year, for each column
#
# This type of average is called a "climatology"
seattle_clim = seattle.groupby(seattle.index.dayofyear).mean()
spokane_clim = spokane.groupby(spokane.index.dayofyear).mean()
# See result
display(seattle_clim)
# Plot of climatologies
plt.figure(figsize=(10,5))
plt.plot(seattle_clim.index,seattle_clim['TMAX'].values,c='k',label='Seattle')
plt.plot(spokane_clim.index,spokane_clim['TMAX'].values,c='b',label='Spokane')
plt.grid()
plt.legend()
plt.xlabel('Day of year')
plt.ylabel('Temperature (°F)')
plt.title('High temperature climatology (1980-2020) at Seattle and Spokane weather stations');
# We can group the data by year, then calculate the annual averages for each column
seattle_ann = seattle.groupby(seattle.index.year).mean()
spokane_ann = spokane.groupby(spokane.index.year).mean()
# See result
display(seattle_ann)
# Plot of the annual average high temperature data
plt.figure(figsize=(18,6))
plt.plot(seattle_ann['TMAX'].index.values,seattle_ann['TMAX'].values,c='k',lw=3,label='Seattle')
plt.plot(spokane_ann['TMAX'].index.values,spokane_ann['TMAX'].values,c='b',lw=3,label='Spokane')
plt.grid()
plt.legend()
plt.ylabel('Temperature (°F)')
plt.title('Annual average high temperatures at Seattle and Spokane weather stations');
###Output
_____no_output_____
###Markdown
Calculating correlations using `.corr()` Recall that a column of a Pandas DataFrame is a Pandas Series.We can correlate two Pandas Series objects, `s1` and `s2`, using the following syntax:> `s1.corr(s2)`The result is the standard Pearson correlation coefficient, $r$.More commonly, however, you'll use $r^2$. As I introduced in Lesson 14, $r^2$ represents the proportion of variance in one variable that is explained by the other variable.API documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
###Code
# Correlate annual average high temperatures from Seattle and Spokane
#
# Note that we provide the entire Pandas Series, not just the .values or the .index
r = seattle_ann['TMAX'].corr(spokane_ann['TMAX'])
print('The r^2 value is:',r**2)
# In other words, there's clearly a relationship between annual-average temperatures in Seattle
# and Spokane.
#
# This plot shows that close relationship, and also shows the regression line.
#
# The r^2 value tells us that 50% of the variance of annual-average temperatures in Spokane
# can be explained by the annual-average Seattle temperatures.
# Linear regression using SciPy (see Lesson #14 for details)
slope,intercept,_,_,_ = stats.linregress(seattle_ann['TMAX'].values,spokane_ann['TMAX'].values)
# Make plot
plt.figure(figsize=(6,6))
plt.scatter(seattle_ann['TMAX'],spokane_ann['TMAX'],c='k',label='Annual averages')
plt.plot(np.arange(57,65), slope * np.arange(57,65) + intercept,c='r',ls='--',label='Linear regression')
plt.legend()
plt.grid()
plt.xlabel('Seattle high temperatures (°F)')
plt.ylabel('Spokane high temperatures (°F)');
###Output
_____no_output_____
###Markdown
Changing the time resolution using `.resample()` We can down-sample the time resolution of a Pandas Series or DataFrame if it has datetimes as its index, i.e. a `DatetimeIndex`.The function `.resample()` takes a "frequency alias" or "offset alias" as its argument. It behaves similar to `.groupby()`, so after you group the data, you have to apply a function like `.mean()`, `.max()`, or `.sum()`.Here are the available frequency aliases: https://pandas.pydata.org/docs/user_guide/timeseries.htmloffset-aliasesSome common options are:* 'H': hourly frequency* 'D': daily frequency* 'W': weekly frequency* 'MS': monthly frequency (use start of month as resulting date)* 'YS': yearly frequency (use start of year as resulting date)API documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html
###Code
# Calculate annual averages using .resample() instead of .groupby()
seattle['TMAX'].resample('YS').mean()
###Output
_____no_output_____
###Markdown
Smoothing data using `.rolling()` averages A common technique to smooth a time series (or other data) is to calculate a rolling average, also called a running average, running mean, or moving average:> `.rolling(window,min_periods=1,center=False)`* `window` specifies the size of the moving window in number of rows* `min_periods` specifies the minimum number of rows required to have data (otherwise the result is np.NaN); this is important at the start and end of the time series* if `center` is True (recommended), the date will be set to center of each window; if False, the date will be set to the end of each windowAgain, `.rolling()` behaves similar to `.groupby()` and `.resample()` in that you have to apply a function like `.mean()` or `.median()` to the grouped data to get a result.API documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html
###Code
# Smooth annual average temperatures using a 5-year running mean
seattle_ann_rm = seattle_ann['TMAX'].rolling(window=5,center=True).mean()
spokane_ann_rm = spokane_ann['TMAX'].rolling(window=5,center=True).mean()
# Plot
plt.figure(figsize=(18,5))
plt.plot(seattle_ann['TMAX'].index.values,seattle_ann['TMAX'].values,c='k',lw=1,alpha=0.5,label='Seattle (original)')
plt.plot(spokane_ann['TMAX'].index.values,spokane_ann['TMAX'].values,c='b',lw=1,alpha=0.5,label='Spokane (original)')
plt.plot(seattle_ann_rm,c='k',lw=3,label='Seattle (5-year running mean)')
plt.plot(spokane_ann_rm,c='b',lw=3,label='Spokane (5-year running mean)')
plt.grid()
plt.legend()
plt.ylabel('Temperature (°F)')
plt.title('Annual average high temperatures at Seattle and Spokane weather stations');
# Smooth high temperature climatologies using a 30-day running mean
seattle_clim_rm = seattle_clim['TMAX'].rolling(window=30,center=True,min_periods=1).mean()
spokane_clim_rm = spokane_clim['TMAX'].rolling(window=30,center=True,min_periods=1).mean()
# Plot of climatologies
plt.figure(figsize=(10,5))
plt.plot(seattle_clim.index,seattle_clim['TMAX'].values,c='k',lw=0.5,alpha=0.8,label='Seattle (original)')
plt.plot(spokane_clim.index,spokane_clim['TMAX'].values,c='b',lw=0.5,alpha=0.8,label='Spokane (original)')
plt.plot(seattle_clim_rm,c='k',lw=2,label='Seattle (30-day running mean)')
plt.plot(spokane_clim_rm,c='b',lw=2,label='Spokane (30-day running mean)')
plt.grid()
plt.legend()
plt.xlabel('Day of year')
plt.ylabel('Temperature (°F)')
plt.title('High temperature climatology (1980-2020) at Seattle and Spokane weather stations');
###Output
_____no_output_____ |
MLPRegressor.ipynb | ###Markdown
Preparo las columnas a usar
###Code
cant_buckets = 500
#buckets
cantidad,rango = np.histogram(properati['price_usd_per_m2'], bins=cant_buckets)
properati['categories_by_price']=pd.cut(properati['price_usd_per_m2'],rango,labels=np.arange(cant_buckets))
properati['price_range']=pd.cut(properati['price_usd_per_m2'],rango)
#lo casteo a float porque si no tira error
properati['categories_by_price']=properati['categories_by_price'].astype(np.float64)
###Output
_____no_output_____
###Markdown
Busco una aproximacion de hiper-parametros con random search
###Code
#paso features con strings a numericos
le_barrio = preprocessing.LabelEncoder()
barrios=properati['state_name']
le_barrio.fit(barrios)
properati['state_name'] = le_barrio.transform(barrios)
le_zona = preprocessing.LabelEncoder()
zona=properati['place_name']
le_zona.fit(zona)
properati['place_name'] = le_zona.transform(zona)
le_tipo = preprocessing.LabelEncoder()
tipos_prop=properati['property_type']
le_tipo.fit(tipos_prop)
properati['property_type'] = le_tipo.transform(tipos_prop)
%%notify
#preparo set de datos
X = zip(properati['surface_total_in_m2'],\
properati['surface_covered_in_m2'],properati['property_type'],properati['state_name'],properati['place_name'])
y = properati['price_aprox_usd']
scaler = preprocessing.Normalizer()
X=scaler.fit_transform(X,y)
perceptron = MLPRegressor()
# Utility function to report best scores
solver = ["lbfgs", "sgd", "adam"]
activation =["identity", "logistic", "tanh", "relu"]
alpha = np.arange(0.0001,0.01,0.0001)
param_grid = {"alpha": alpha, "solver": solver,"activation":activation}
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2)
# run randomized search
random_search = RandomizedSearchCV(perceptron, param_distributions=param_grid,
n_iter=20,cv=5,n_jobs=-1) #refit=False es para poder usar multiscoring
start = time()
random_search.fit(X_train, y_train)
print("RandomizedSearchCV duro %.2f segundos para %d candidatos a hiper-parametros."
% (time() - start, len(random_search.cv_results_['params'])))
print("")
score.report_single(random_search.cv_results_)
###Output
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.
% self.max_iter, ConvergenceWarning)
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.
% self.max_iter, ConvergenceWarning)
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.
% self.max_iter, ConvergenceWarning)
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (200) reached and the optimization hasn't converged yet.
% self.max_iter, ConvergenceWarning)
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/_base.py:194: RuntimeWarning: overflow encountered in square
return ((y_true - y_pred) ** 2).mean() / 2
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/_base.py:194: RuntimeWarning: overflow encountered in square
return ((y_true - y_pred) ** 2).mean() / 2
/usr/local/lib/python2.7/dist-packages/sklearn/neural_network/_base.py:194: RuntimeWarning: overflow encountered in square
return ((y_true - y_pred) ** 2).mean() / 2
###Markdown
Busco mas detalladamente los hiper-parametros en el rango de los mejores resultados con Grid Search
###Code
%%notify
#preparo set de datos
X = zip(properati['dist_a_subte'],properati['dist_a_univ'])
y = properati['categories_by_price']
perceptron = Perceptron(n_jobs=-1)
alpha=np.arange(0.2,0.5,0.01)
pen =['l2','elasticnet']
param_grid = {"alpha": alpha, "penalty": pen}
custom_cv = ShuffleSplit(n_splits=5, test_size=0.2)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2)
grid_search = GridSearchCV(perceptron,param_grid=param_grid,cv=custom_cv)
start = time()
grid_search.fit(X_train, y_train)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
score.report_single(grid_search.cv_results_)
mejor_rf = grid_search.best_estimator_
print mejor_rf
errores = mejor_rf.predict(X_test)-y_test
print ("Error maximo:{0}\nError minimo:{1}".format( max(abs(errores)),min(abs(errores))))
print(errores)
count_max=0
max_error=100
lista=[]
for error in errores:
if abs(error)>max_error:
count_max+=1
lista.append(abs(error))
count_max
# the histogram of the data
plt.figure(figsize=(12,8))
plt.hist(errores, 100, facecolor='blue')
plt.xlabel('Errores')
plt.ylabel('Cantidad')
#plt.xlim(-1000, 1000) #para variar el "zoom a 0"
plt.yscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
Model
###Code
#MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
# Data processing
X = mlp_model_data.drop('TmScore', axis=1).values
y = mlp_model_data['TmScore'].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.10)
MLP_model = MLPRegressor(hidden_layer_sizes=100, max_iter= 5000, random_state= 40, batch_size= 30, solver= 'lbfgs',
activation= 'identity')
MLP_model.fit(X_train, y_train)
y_pred = MLP_model.predict(X_test)
print(mean_absolute_error(y_test, y_pred))
Pkl_MLP_model = 'MLP_model.sav'
pickle.dump(MLP_model, open(Pkl_MLP_model, 'wb'))
pd.options.display.max_rows = None
#importance = MLP_model.feature_importances_
#feature_names = mlp_model_data.drop('TmScore', axis=1).columns
#d = {'Var_Name': feature_names, 'Imp': importance}
#df = pd.DataFrame(data=d)
#df = df.sort_values(by= ['Imp'], ascending = False).reset_index(drop=True)
#df.head(15)
# function that return the score of two teams playing
pd.options.display.max_columns = None
# Create playoff test dataset from season averages
def Score_Predictor(home_team, away_team):
team1 = home_team
team2 = away_team
team1_data = mlp_model_data[com_data['Team'] == team1].drop('TmScore', axis=1).reset_index(drop=True)
team2_data = mlp_model_data[com_data['Team'] == team2].drop('TmScore', axis=1).reset_index(drop=True)
week_slice = slice(0,16)
#1 Remove if no team names
team1_test = pd.DataFrame(team1_data[week_slice].mean(axis=0)).T #select week to use as average
team1_test
opp_columns = team1_test.filter(like='Opp').columns
team1_test[opp_columns] = 0
team1_test['Opp_' + team2] = 1
team1_test['Home'] = 1
#2
team2_test = pd.DataFrame(team2_data[week_slice].mean(axis=0)).T #select week to use as average
opp_columns = team2_test.filter(like='Opp').columns
team2_test[opp_columns] = 0
team2_test['Opp_' + team1] = 1
team2_test['Home'] = 1 # change to remove home field advantage
# head to head matchup
team1_test[['D_1stD','D_Tot_Yd','D_P_Yd','D_R_Yd','D_TO']] = team2_test[['O_1stD','O_Tot_yd','O_P_Yd','O_R_Yd','O_TO']]
team2_test[['D_1stD','D_Tot_Yd','D_P_Yd','D_R_Yd','D_TO']] = team1_test[['O_1stD','O_Tot_yd','O_P_Yd','O_R_Yd','O_TO']]
X_Playoff_test = pd.concat([team1_test, team2_test])
X_Playoff_test.fillna(0, inplace = True) # added to address the NANs that was causing the error
scores = MLP_model.predict(X_Playoff_test)
print(team1, "will score", round(scores[0], 1))
print(team2, "will score", round(scores[1], 1))
if scores[0] > scores[1]:
winner = team1
else:
winner = team2
print(winner, "are the WINNERS!!!")
return scores, winner
print(y_train.shape)
print(y_test.shape)
###Output
(201, 1)
(23, 1)
###Markdown
Potentially add for loop to insert the winner to next round (The names have to be in full names) and add the seeding NFC Playoff Round 1
###Code
scores, winner = Score_Predictor('Philadelphia Eagles', 'Tampa Bay Buccaneers')
scores, winner = Score_Predictor('San Francisco 49ers', 'Dallas Cowboys')
scores, winner = Score_Predictor('Arizona Cardinals', 'Los Angeles Rams')
###Output
Arizona Cardinals will score 29.8
Los Angeles Rams will score 23.8
Arizona Cardinals are the WINNERS!!!
###Markdown
AFC Playoff Round 1
###Code
scores, winner = Score_Predictor('Pittsburgh Steelers', 'Kansas City Chiefs')
scores, winner = Score_Predictor('Las Vegas Raiders', 'Cincinnati Bengals')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
###Markdown
NFC Playoff Round 2
###Code
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
###Markdown
NFC Playoff Round 2
###Code
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
###Markdown
NFC Championship
###Code
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
###Markdown
AFC Championship
###Code
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
###Markdown
Suberbowl
###Code
scores, winner = Score_Predictor('Buffalo Bills', 'New England Patriots')
###Output
Buffalo Bills will score 22.8
New England Patriots will score 28.9
New England Patriots are the WINNERS!!!
|
milestone1/1_network_properties.ipynb | ###Markdown
NTDS'18 milestone 1: network collection and properties[Effrosyni Simou](https://lts4.epfl.ch/simou), [EPFL LTS4](https://lts4.epfl.ch) Students* Team: 37* Students: Adélie Eliane Garin, Celia Camille Hacker, Isabela Constantin, Michael Spieler* Dataset: Wikipedia https://snap.stanford.edu/data/wikispeedia/wikispeedia_paths-and-graph.tar.gz Rules* Milestones have to be completed by teams. No collaboration between teams is allowed.* Textual answers shall be short. Typically one to three sentences.* Code has to be clean.* You cannot import any other library than we imported.* When submitting, the notebook is executed and the results are stored. I.e., if you open the notebook again it should show numerical results and plots. We won't be able to execute your notebooks.* The notebook is re-executed from a blank state before submission. That is to be sure it is reproducible. You can click "Kernel" then "Restart & Run All" in Jupyter. Objective The purpose of this milestone is to start getting acquainted to the network that you will use for this class. In the first part of the milestone you will import your data using [Pandas](http://pandas.pydata.org) and you will create the adjacency matrix using [Numpy](http://www.numpy.org). This part is project specific. In the second part you will have to compute some basic properties of your network. **For the computation of the properties you are only allowed to use the packages that have been imported in the cell below.** You are not allowed to use any graph-specific toolboxes for this milestone (such as networkx and PyGSP). Furthermore, the aim is not to blindly compute the network properties, but to also start to think about what kind of network you will be working with this semester.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import sparse
%matplotlib inline
# for replicating Q10
np.random.seed(seed=37)
###Output
_____no_output_____
###Markdown
Part 1 - Import your data and manipulate them. A. Load your data in a Panda dataframe. First, you should define and understand what are your nodes, what features you have and what are your labels. Please provide below a Panda dataframe where each row corresponds to a node with its features and labels. For example, in the the case of the Free Music Archive (FMA) Project, each row of the dataframe would be of the following form:| Track | Feature 1 | Feature 2 | . . . | Feature 518| Label 1 | Label 2 |. . .|Label 16||:-------:|:-----------:|:---------:|:-----:|:----------:|:--------:|:--------:|:---:|:------:|| | | | | | | | | |It is possible that in some of the projects either the features or the labels are not available. This is OK, in that case just make sure that you create a dataframe where each of the rows corresponds to a node and its associated features or labels.
###Code
data_path = '../wikispeedia_paths-and-graph/'
# function to transform titles from ascii to utf-8
def decode_utf8(string):
parts = string.encode('ascii').split(b'%')
decoded = [bytes.fromhex(part[:2].decode('ascii')) + part[2:] for part in parts[1:]]
raw = parts[0] + b''.join(decoded)
return raw.decode('utf-8')
articles = pd.read_csv(data_path+ 'articles.tsv', sep='\t', names=['article'], skiprows=11)
articles.head(10)
articles['article'] = articles['article'].apply(decode_utf8)
articles.head(10)
categories = pd.read_csv(data_path+ 'categories.tsv', sep='\t', names=['category'], skiprows=12).reset_index()
categories.rename(columns={"index": "article"}, inplace=True)
categories['article'] = categories['article'].apply(decode_utf8)
categories['category'] = categories['category'].apply(lambda x: [x])
categories.head()
# collapse all the categories into a list
article_df = categories.groupby(by= 'article').agg({'category': 'sum'}).reset_index()
article_df.head(10)
#sanity check
article_df.loc[article_df['article'] == 'Áedán_mac_Gabráin']['category'].iloc[0]
article_df.shape
###Output
_____no_output_____
###Markdown
We used `article_df` instead of `features` B. Create the adjacency matrix of your network. Remember that there are edges connecting the attributed nodes that you organized in the dataframe above. The connectivity of the network is captured by the adjacency matrix $W$. If $N$ is the number of nodes, the adjacency matrix is an $N \times N$ matrix where the value of $W(i,j)$ is the weight of the edge connecting node $i$ to node $j$. There are two possible scenarios for your adjacency matrix construction, as you already learned in the tutorial by Benjamin:1) The edges are given to you explicitly. In this case you should simply load the file containing the edge information and parse it in order to create your adjacency matrix. See how to do that in the [graph from edge list]() demo.2) The edges are not given to you. In that case you will have to create a feature graph. In order to do that you will have to chose a distance that will quantify how similar two nodes are based on the values in their corresponding feature vectors. In the [graph from features]() demo Benjamin showed you how to build feature graphs when using Euclidean distances between feature vectors. Be curious and explore other distances as well! For instance, in the case of high-dimensional feature vectors, you might want to consider using the cosine distance. Once you compute the distances between your nodes you will have a fully connected network. Do not forget to sparsify by keeping the most important edges in your network.Follow the appropriate steps for the construction of the adjacency matrix of your network and provide it in the Numpy array ``adjacency`` below:
###Code
edges = pd.read_csv(data_path + 'links.tsv', sep='\t', names=['article', 'link'], skiprows=12)
edges['article'] = edges['article'].apply(decode_utf8)
edges['link'] = edges['link'].apply(decode_utf8)
edges.head()
# Note there are links being linked but not having a category
set(list(edges['link'])) - set(list(article_df['article']))
# Note there are links that link to other articles but dont have a category
set(list(edges['article'])) - set(list(article_df['article']))
###Output
_____no_output_____
###Markdown
We add these pages to our article_df even if they dont have a category
###Code
article_df = article_df.merge(edges.drop(columns=['link']).drop_duplicates(), \
right_on= 'article', left_on= 'article', how='outer' )
print(len(article_df))
edges_temp = edges.drop(columns=['article']).drop_duplicates().rename(columns = {'link': 'article'})
article_df = article_df.merge(edges_temp, right_on= 'article', left_on= 'article', how='outer')
article_df.head()
article_df.shape
nodes = article_df.reset_index(level=0)
nodes.drop(columns=['category'], inplace=True)
nodes.rename(columns={'index':'idx'}, inplace=True)
nodes.head()
# map nodes to indicies
node_map = dict(zip( nodes.article, nodes.idx))
edges['article_idx'] = edges['article'].apply(lambda x: node_map[x])
edges['link_idx'] = edges['link'].apply(lambda x: node_map[x])
edges = edges.drop(columns=['article', 'link'])
edges.head()
###Output
_____no_output_____
###Markdown
We have the convention that adjacency[i, j] = 1 means that node i (row i) links to node j (column j)
###Code
n_nodes = len(nodes)
adjacency = np.zeros((n_nodes, n_nodes), dtype=int)
for idx, row in edges.iterrows():
if np.isnan(row.link_idx):
continue
i, j = int(row.article_idx), int(row.link_idx)
adjacency[i, j] = 1
n_nodes
# sanity checks
adjacency[1][3002] == 1 # there is a link between page 1 and 3002
edges.loc[(edges['article_idx'] == 1) & (edges['link_idx'] == 3002)]
###Output
_____no_output_____
###Markdown
Part 2 Execute the cell below to plot the (weighted) adjacency matrix of your network.
###Code
plt.spy(adjacency)
plt.title('adjacency matrix')
###Output
_____no_output_____
###Markdown
Question 1What is the maximum number of links $L_{max}$ in a network with $N$ nodes (where $N$ is the number of nodes in your network)? How many links $L$ are there in your collected network? Comment on the sparsity of your network.
###Code
n_nodes = len(nodes)
L = np.sum(adjacency)
# This sums the links in the directed graph: if we have A->B and B->A then it counts as two links.
# If we wanted this to count as one link we would have to do the same computation on the undirected adjacency matrix and divide by 2
L_max_undirected = int(n_nodes*(n_nodes-1)/2)
# Again, n_nodes*(n_nodes-1)/2) is the undirected case. In the directed case there can be two links between a node A and a node B.
L_max = L_max_undirected * 2
# We multiplied by 2 because the maximal number of links is "doubled" by A->B and B->A (need to count them twice)
print('L = {}, L_max = {}, sparsity = {:.4f}'.format(L, L_max, L/L_max))
###Output
L = 119882, L_max = 21173802, sparsity = 0.0057
###Markdown
*Answer* Clearly L << L_max as in many real world networks. It makes sense here as many wikipedia pages (like 'cats') will not be linked to other unrelated subjects (like 'spaceships') :)We can also see the sparsity in the adjacency matrix above. Question 2Is your graph directed or undirected? If it is directed, convert it to an undirected graph by symmetrizing the adjacency matrix. **Your answer here.***Answer:*Our graph is directed since a URL link on a Wiki page is directed.To make it undirected the adjacency matrix can be OR-ed with its transposed such that it is symmetric.
###Code
adjacency_undirected = np.maximum(adjacency, adjacency.T)
plt.spy(adjacency_undirected)
###Output
_____no_output_____
###Markdown
Question 3In the cell below save the features dataframe and the **symmetrized** adjacency matrix. You can use the Pandas ``to_csv`` to save the ``features`` and Numpy's ``save`` to save the ``adjacency``. We will reuse those in the following milestones.
###Code
# Your code here.
edges.to_csv('edges.csv')
article_df.to_csv('article_dv.csv')
#np.save('adjacency_sym.npy', adjacency_sym)
np.savez_compressed('adjacency_undirected.npz', adjacency_undirected)
###Output
_____no_output_____
###Markdown
NOTE: For the following questions we consider only the undirected graph! Question 4Are the edges of your graph weighted? **Your answer here.**No, links between Wikipedia pages are not weighted. Question 5What is the degree distibution of your network?
###Code
degree = np.sum(adjacency_undirected, axis=0)
assert len(degree) == n_nodes
###Output
_____no_output_____
###Markdown
Execute the cell below to see the histogram of the degree distribution.
###Code
weights = np.ones_like(degree) / float(n_nodes)
plt.hist(degree, weights=weights);
# for a more informative plot, use log scale
plt.hist(degree, weights=weights, log=True);
###Output
_____no_output_____
###Markdown
What is the average degree?
###Code
# Considering the undirected graph
L_undirected= np.sum(adjacency_undirected)/2
# We compute the number of links in the undirected case as this will differ from the directed case
print ('The average degree in the network is {:.2f}'.format(2*L_undirected/n_nodes))
###Output
The average degree in the network is 46.32
###Markdown
Question 6Comment on the degree distribution of your network. **Your answer here.***Answer*We have 4602 nodes with an average degree of 46.32 (in the undirected network). Compared to other internet networks this is much higher. For example the network consisting of webpages, the average is only 4.6. It is not surprising in the case of Wikipedia as there are many links in every wikipedia page. Looking at the log scale, we can see that a very small number of pages have a very high number of links (they could be hubs) Question 7Write a function that takes as input the adjacency matrix of a graph and determines whether the graph is connected or not.
###Code
# a queue data structure for BFS
class Queue:
def __init__(self):
self.elem = []
def isEmpty(self):
return (len(self.elem) == 0)
def enqueue(self, item):
self.elem.append(item)
def dequeue(self):
return self.elem.pop(0)
def size(self):
return len(self.items)
# pseudocode: start BFS from node 0. if by the end the number of visited nodes < nb of nodes
# then the graph is disconnected
def connected_graph(adjacency):
"""Determines whether a graph is connected.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
Returns
-------
bool
True if the graph is connected, False otherwise.
"""
start_node = 0
node_q = Queue()
node_q.enqueue(start_node)
visited = set()
visited.add(start_node)
nb_visited = 1
while (node_q.isEmpty() == False ):
curr = node_q.dequeue()
successors = adjacency[curr].nonzero()[0]
for succ in successors:
if succ not in visited:
node_q.enqueue(succ)
visited.add(succ)
nb_visited += 1
connected = (nb_visited == adjacency.shape[0])
if connected:
print('The graph is connected')
else:
print('The graph is not connected')
print('The number of visited nodes starting from ', start_node, ' is ', nb_visited, ' out of ', len(adjacency))
return connected
###Output
_____no_output_____
###Markdown
Is your graph connected? Run the ``connected_graph`` function to determine your answer.
###Code
# Your code here.
connected_graph(adjacency_undirected)
###Output
The graph is not connected
The number of visited nodes starting from 0 is 4589 out of 4602
###Markdown
Question 8Write a function that extracts the connected components of a graph.
###Code
# similar approach as in previous question, but add an outer for loop in order to go through all connected components
# of the graph
def find_components(adjacency):
"""Find the connected components of a graph.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
Returns
-------
list of numpy arrays
A list of adjacency matrices, one per connected component.
"""
n_nodes = adjacency.shape[0]
components = []
is_visited_global = np.zeros(n_nodes, dtype=bool)
for node in range(n_nodes):
if is_visited_global[node] == False:
start_node = node
node_q = Queue()
node_q.enqueue(start_node)
visited = set()
visited.add(start_node)
is_visited_global[start_node]= True
while (node_q.isEmpty() == False ):
curr = node_q.dequeue()
successors = adjacency[curr].nonzero()[0]
for succ in successors:
if succ not in visited:
node_q.enqueue(succ)
visited.add(succ)
is_visited_global[succ] = True
# now a component has been found, add it to the list of adj matricies
idx_comp = list(visited)
components.append(adjacency[idx_comp][:,idx_comp])
return components
###Output
_____no_output_____
###Markdown
Note:* we could have used a single function to do BFS over a connected component in both Q 7 and 8 to avoid code repetition, but to go by the required API, we decided to stick to writing two independent functions. How many connected components is your network composed of? What is the size of the largest connected component? Run the ``find_components`` function to determine your answer.
###Code
# Your code here.
connected_comp = find_components(adjacency_undirected)
print('The number of connected components in the graph is ', len(connected_comp))
idx_larg_comp = np.argmax([len(adj) for adj in connected_comp])
adj_larg_comp = connected_comp[idx_larg_comp]
nb_nodes_larg_comp = len(adj_larg_comp)
print('The largest component has ', nb_nodes_larg_comp, ' nodes')
###Output
The number of connected components in the graph is 12
The largest component has 4589 nodes
###Markdown
Question 9Write a function that takes as input the adjacency matrix and a node (`source`) and returns the length of the shortest path between that node and all nodes in the graph using Dijkstra's algorithm. **For the purposes of this assignment we are interested in the hop distance between nodes, not in the sum of weights. **Hint: You might want to mask the adjacency matrix in the function ``compute_shortest_path_lengths`` in order to make sure you obtain a binary adjacency matrix.
###Code
# Implements the Djikstra algorithm from Wikipedia: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Algorithm
def compute_shortest_path_lengths(adjacency, source):
"""Compute the shortest path length between a source node and all nodes.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
source: int
The source node. A number between 0 and n_nodes-1.
Returns
-------
list of ints
The length of the shortest path from source to all nodes. Returned list should be of length n_nodes.
"""
n_nodes = adjacency.shape[0]
MAX_DIST = np.inf
unvisited = set(np.arange(n_nodes))
shortest_path_lengths = np.full(n_nodes, MAX_DIST)
shortest_path_lengths[source] = 0
while unvisited:
unvisited_list = list(unvisited)
current = unvisited_list[np.argmin(shortest_path_lengths[unvisited_list])]
adjacency_list = adjacency[current]
neighbors = set(np.nonzero(adjacency_list)[0])
for n in neighbors.intersection(unvisited):
path_len = shortest_path_lengths[current] + 1
if shortest_path_lengths[n] > path_len:
shortest_path_lengths[n] = path_len
unvisited.remove(current)
return shortest_path_lengths
short_node_0 = compute_shortest_path_lengths(adjacency_undirected, 0)
short_node_0
# For example, list of nodes not reachable by 0
np.where(short_node_0 == np.inf)
###Output
_____no_output_____
###Markdown
Question 10The diameter of the graph is the length of the longest shortest path between any pair of nodes. Use the above developed function to compute the diameter of the graph (or the diameter of the largest connected component of the graph if the graph is not connected). If your graph (or largest connected component) is very large, computing the diameter will take very long. In that case downsample your graph so that it has 1.000 nodes. There are many ways to reduce the size of a graph. For the purposes of this milestone you can chose to randomly select 1.000 nodes.
###Code
# Your code here.
# take the largest connected comp
# sample randomly approx 1000 nodes and extract the largest comp from the subsampled graph
sample_idx = np.random.choice(nb_nodes_larg_comp, size = 1000, replace=False)
adj_sample = adj_larg_comp[sample_idx][:,sample_idx]
components_sample = find_components(adj_sample)
idx_larg_comp = np.argmax([len(adj) for adj in components_sample])
adj_sample_conn = components_sample[idx_larg_comp]
# compute the longest shortest path for each node.
nb_n_sample = len(adj_sample_conn)
print('The selected component has ', nb_n_sample, ' nodes')
longest_shortest_paths = [np.max(compute_shortest_path_lengths(adj_sample_conn, node)) for node in range(nb_n_sample)]
print('The diameter of the largest connected comp of the sub-sampled graph is ', np.max(longest_shortest_paths) )
###Output
The diameter of the largest connected comp of the sub-sampled graph is 8.0
###Markdown
Question 11Write a function that takes as input the adjacency matrix, a path length, and two nodes (`source` and `target`), and returns the number of paths of the given length between them. Note: for answering this question, we used the following theorem:Let G a simple undirected graph and A its adjacency matrix. The $(i,j)$ th entry of $A^k$ counts the number of walks of length $k$ having source and end vertices$i$ and $j$ respectively.
###Code
# as it was much slower to compute on the dense matrix, we first made it sparse and improved the time from 188 seconds to 3 seconds
def sparse_matrix_pow(A, k):
As = sparse.csr_matrix(A)
tmp = As
for i in range(k-1):
tmp = tmp*As
As = tmp
Ad = np.empty(A.shape, dtype=A.dtype)
As.todense(out=Ad)
return Ad
def compute_paths(adjacency, source, target, length):
"""Compute the number of paths of a given length between a source and target node.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
source: int
The source node. A number between 0 and n_nodes-1.
target: int
The target node. A number between 0 and n_nodes-1.
length: int
The path length to be considered.
Returns
-------
int
The number of paths.
"""
#n_paths=int(np.linalg.matrix_power(adjacency, length)[source][target])
n_paths = int(sparse_matrix_pow(adjacency, length)[source][target])
return n_paths
###Output
_____no_output_____
###Markdown
Test your function on 5 pairs of nodes, with different lengths.
###Code
print(compute_paths(adjacency_undirected, 0, 10, 1))
print(compute_paths(adjacency_undirected, 0, 10, 2))
print(compute_paths(adjacency_undirected, 0, 10, 3))
print(compute_paths(adjacency_undirected, 23, 67, 2))
print(compute_paths(adjacency_undirected, 15, 93, 4))
###Output
0
2
248
0
8079
###Markdown
Question 12How many paths of length 3 are there in your graph? Hint: calling the `compute_paths` function on every pair of node is not an efficient way to do it.
###Code
# we sum all the paths of length 3
adjacency_undirected_power_3=sparse_matrix_pow(adjacency_undirected,3)
print('The number of paths of length 3 in our graph is ' + str(int(np.sum(adjacency_undirected_power_3))))
###Output
The number of paths of length 3 in our graph is 3702162721
###Markdown
Question 13Write a function that takes as input the adjacency matrix of your graph (or of the largest connected component of your graph) and a node and returns the clustering coefficient of that node.
###Code
# we modified the API to account for the matrix multiplication, in order to do it just once.
def compute_clustering_coefficient(adjacency, node, power_mat=None, degree=None):
"""Compute the clustering coefficient of a node.
Parameters
----------
adjacency: numpy array
The (weighted) adjacency matrix of a graph.
node: int
The node whose clustering coefficient will be computed. A number between 0 and n_nodes-1.
Returns
-------
float
The clustering coefficient of the node. A number between 0 and 1.
"""
if power_mat is None:
power_mat = sparse_matrix_pow(adjacency, 3)
L = power_mat[node][node]/2
#for L we computed the number of triangles based at the node, this number divided by two gives the number of links between the neighbors of the node
if degree is None:
degree = np.sum(adjacency, axis = 0)
k= degree[node]
if k in {0, 1}:
clustering_coefficient= 0
else:
clustering_coefficient= L*2/(k*(k-1))
return clustering_coefficient, power_mat, degree
coeff, power_mat, degree = compute_clustering_coefficient(adj_larg_comp,0 )
print('The clustering coeff of node 0 is {:.5f}'.format(coeff))
###Output
The clustering coeff of node 0 is 0.16877
###Markdown
Question 14What is the average clustering coefficient of your graph (or of the largest connected component of your graph if your graph is disconnected)? Use the function ``compute_clustering_coefficient`` to determine your answer.
###Code
nb_nodes_larg_comp
average_clustering_coefficient=0
for i in range(nb_nodes_larg_comp):
coeff, _, _ = compute_clustering_coefficient(adj_larg_comp, i, power_mat, degree)
average_clustering_coefficient+= coeff
average_clustering_coefficient=average_clustering_coefficient/nb_nodes_larg_comp
print('The average clustering coeffcient of our network is {:.5f}'.format(average_clustering_coefficient))
###Output
The average clustering coeffcient of our network is 0.27784
###Markdown
Unit tests Question 8
###Code
# connected graph
adj_undir = np.array([[0, 1, 1, 0, 1, 0],
[1, 0, 1, 1, 0, 1],
[1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[1, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 1, 0]])
find_components(adj_undir)
# disconnect 0
adj_undir_2 = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 1, 0]])
find_components(adj_undir_2)
# disconnect 5
adj_undir_3 = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
find_components(adj_undir_3)
# have all disconnected
adj_undir_4 = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
find_components(adj_undir_4)
###Output
_____no_output_____ |
OCR_SUDOKU_SOLVER/Sudoku_Solver_OCR.ipynb | ###Markdown
Table of Contents1 Making Necessary imports2 Defining some additional helper functions3 A paramater dictionary for the task4 To Do:5 How:6 Now the question arises what if the the digits are misclassified?7 How do we address the above issue ?8 Step 1)8.0.1 Creating ImageDataGenerator instance and appling it on the input image9 Step 2)9.0.1 Loading model10 Note: From here on the next few cells show what parts of a whole process do11 We will put them all together at the end12 Step 3)12.0.1 Resize the image to desired size and use the find_puzzle function we defined to detect puzzle outline13 Step 4)13.0.1 Exp :14 Step 5)14.0.1 Exp :15 Note : It is not an issue if the puzzle was detected incorrectly in the above cell15.0.1 Run the 2 following cells only if previously the puzzle was detected correctly16 Step 6)16.0.1 Displaying result to the user17 Putting it all together17.0.1 In this section, we integrate all the above mentioned parts18 Thus we have successfully achieved our goal of solving a Sudoku puzzle from a given picture through OCR and Deep Learning Making Necessary imports
###Code
import cv2
import skimage
from sudoku import Sudoku
import utilities # contains some helper functions we shall use
from skimage.segmentation import clear_border
import numpy as np
import tensorflow
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from utilities.puzzle import find_puzzle
from utilities.puzzle import extract_digit
import matplotlib.pyplot as plt
%matplotlib inline
from utilities.transform import birds_eye_view
from utilities.resizer import resize
from ipywidgets import widgets
###Output
_____no_output_____
###Markdown
Defining some additional helper functions
###Code
def disp(image):
plt.figure(figsize=(12,12))
plt.imshow(image,cmap="gray")
def show_pre_processed_digit(image,x,y):
image=imutils.resize(image,width=600)
(puzzleImage,warped)=find_puzzle(image,debug=args["debug"]>0)
startX=x*stepX
startY=y*stepY
endX=(x+1)*stepX
endY=(y+1)*stepY
cell=warped[startY:endY,startX:endX]
digit=extract_digit(cell,debug=args["debug"]>0)
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float")/255.0
plt.imshow(roi,cmap="gray")
def predict(sm):
sm=img_to_array(sm)
sm=np.expand_dims(sm,axis=0)
pred=model.predict(sm).argmax(axis=1)[0]
print("Prediction is ",pred)
###Output
_____no_output_____
###Markdown
A paramater dictionary for the task
###Code
args={"model":"models/digit_classifier1.h5",
"image":"images/2/1.jpg",
"debug":-1}
###Output
_____no_output_____
###Markdown
The debug argument can be set to any positive value if we want to view the intermediate outputs during the process To Do:**1) Given a picture of a Sudoku puzzle, we must be able to extract the exact puzzle from the image****2) Extract out every single cell(9*9=81 cells)** **3) Find out if that particular cell contains a digit or not**If yes, then extract the digit from that cell and feed it to our digit classifier modelwhich has been trained on data augmented MNIST dataset of numbers**4) Then feed the puzzle including the classified digits to the Sudoku solver from the Py Sudoku library** **5) Display the solved puzzle on top of the puzzle image provided by user** How:1) We use some basic image operations and effects, to reduce noise2) We use the (user_defined) find_puzzle function to identify the puzzle border3) We use the (user_defined) extract_digit function to extract number(if it exists) within each cell4) After extracting digits, we load the already trained keras model to classify those digits5) We also try to avoid minor misclassifications,by identifying pairs of numbers that are confusing to our modeland we try to rectify it.(Remember NO model works 100% accurately)6) We feed the above detected puzzle(with classified digits) to py Sudoku solver function7) We use OpenCV to display the solved puzzle on top of the input image Now the question arises what if the the digits are misclassified?The user generally provides **a single picture.**But that picture might have different backgrounds, different lighting conditions,different angle etc..There are various such features which tend to make the model **misclassify digits** in it How do we address the above issue ?We use the **ImageDataGenerator** API from tensorflow.keras to perform augmentation on the input imageThen we use multiple images from the generator and try to classify digits in the puzzle.We display the detected puzzle with the classified digits to the userBased on **user feedback**, we choose to use the current image...or to try the next image from the generator *Note*: Make sure you set the debug paramater to a positive value only when there is a need to view outputs of intermediate steps Step 1) Creating ImageDataGenerator instance and appling it on the input image So,we have chosen as image to perform the task on , by including the path in the args dictionary
###Code
img=cv2.imread(args["image"])
print(img.shape)
disp(img)
# Since ImageDataGenerator expects data to be in batches, we reshape the image as batch of 1 image
img=np.array([img])
print(img.shape)
# Instantiate
aug=ImageDataGenerator(rotation_range=3,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.1,
brightness_range=(1.3,1.6))
# Apply to input image
aug_img=aug.flow(img)
###Output
(4000, 1824, 3)
(1, 4000, 1824, 3)
###Markdown
Step 2) Loading modelUse the load_model function of Keras to load our saved model
###Code
model=load_model(args["model"])
###Output
_____no_output_____
###Markdown
Note: From here on the next few cells show what parts of a whole process do We will put them all together at the end Step 3) Resize the image to desired size and use the find_puzzle function we defined to detect puzzle outline Suppose we obtain an image through some augmentation
###Code
for temp in range(1):
image=next(aug_img).astype("uint8").reshape((4000,1824,3))\
image=resize(image,width=600)
# Using find_puzzle function
# It returns the original image and the detected puzzle image
(puzzleImage,warped)=find_puzzle(image,debug=args["debug"]>0)
###Output
_____no_output_____
###Markdown
Step 4) So to determine the dimensions and location of each cell we define some constants
###Code
stepX=warped.shape[1]//9
stepY=warped.shape[0]//9
cellLocs=[]
board=np.zeros((9,9),dtype="int")
###Output
_____no_output_____
###Markdown
Exp : *step x and step y are constants we use for extracting cell locations while traversing the puzzle*cellLocs is a list we shall use later for displaying output to user*board is the board of numbers we feed to the Sudoku solving algorithm Step 5)During this step we traverse through all 81 cells of the puzzle,and at each cell we perform a set of operations which will ultimately give us the detected and classified puzzle board
###Code
disp(warped)
for y in range(0,9): # travel through each row
row=[]
for x in range(0,9): # travel through each column of that row
startX = x * stepX
startY = y * stepY
endX = (x+1) * stepX
endY = (y+1) * stepY
row.append((startX,startY,endX,endY))
# Now take out that particular cell from the whole image
cell=warped[startY:endY,startX:endX]
# Use the extract_digit function we defined to get the digit(if present) in that cell
digit=extract_digit(cell,debug=args["debug"]>0)
# If digit is present, feed it to the model for prediction
if digit is not None:
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float")/255.0
roi=img_to_array(roi)
# Since we trained the model on (28,28,1) images rather than (28,28)
roi=np.expand_dims(roi,axis=0)
if model.predict(roi).argmax(axis=1)[0] in [0,6,8]:
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float32")/255.0
roi=cv2.medianBlur(roi,5)
roi=img_to_array(roi)
roi=np.expand_dims(roi,axis=0)
if model.predict(roi).argmax(axis=1)[0] in [1,7]:
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float")/255.0
roi=cv2.GaussianBlur(roi,(5,5),2)
roi=img_to_array(roi)
roi=np.expand_dims(roi,axis=0)
pred=model.predict(roi).argmax(axis=1)[0]
# Update the board with the classified digit
board[y,x] = pred
cellLocs.append(row)
###Output
_____no_output_____
###Markdown
Exp :* While performing classification during many testing phase,it was found that some 6s and 7s get misclassified as 8s and 1s respectively.* Lines 28-40 are to perform a bit of additonal cleaning or blurring to reduce the misclassification* These are purely based on research and muliple testings and trials.The additonal operation parameters are not arbitrary
###Code
puzzle=Sudoku(3,3,board=board.tolist())
print("Sudoku board obtained from OCR..")
puzzle.show()
###Output
Sudoku board obtained from OCR..
+-------+-------+-------+
| 4 | | 6 1 |
| 9 | 8 | |
| 3 8 | 6 1 | 7 9 |
+-------+-------+-------+
| | 1 | 1 9 |
| | 5 | |
| 2 7 | 1 | |
+-------+-------+-------+
| 5 6 | 3 2 | 8 4 |
| | 5 | 9 |
| 9 2 | | 3 |
+-------+-------+-------+
###Markdown
Note : It is not an issue if the puzzle was detected incorrectly in the above cellWhile integrating all parts, we will use multiple augmented images(rather than 1) Run the 2 following cells only if previously the puzzle was detected correctly
###Code
solution=puzzle.solve()
solution.show_full()
###Output
---------------------------
9x9 (3x3) SUDOKU PUZZLE
Difficulty: SOLVED
---------------------------
+-------+-------+-------+
| 7 5 4 | 2 9 3 | 6 1 8 |
| 6 1 9 | 8 4 7 | 2 3 5 |
| 2 3 8 | 5 6 1 | 4 7 9 |
+-------+-------+-------+
| 3 4 5 | 7 8 1 | 1 9 6 |
| 1 8 6 | 9 5 4 | 7 2 3 |
| 9 2 7 | 1 3 6 | 5 8 4 |
+-------+-------+-------+
| 5 6 1 | 3 2 9 | 8 4 7 |
| 8 7 3 | 4 1 5 | 9 6 2 |
| 4 9 2 | 6 7 8 | 3 5 1 |
+-------+-------+-------+
###Markdown
Step 6) Displaying result to the user
###Code
for (cellRow, boardRow) in zip(cellLocs, solution.board):
# Loop over individual cells in the row
for (box, digit) in zip(cellRow, boardRow):
# Unpack the cell coordinates
startX, startY, endX, endY = box
# Compute coordinates of where the digit will be drawn
# on the output puzzle image
textX = int((endX - startX) * 0.33)
textY = int((endY - startY) * -0.2)
textX += startX
textY += endY
# Draw the result digit on the Sudoku puzzle image
cv2.putText(puzzleImage, str(digit), (textX, textY),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
# Displaythe output image
cv2.imshow("Sudoku Result", puzzleImage)
cv2.waitKey(0)
###Output
_____no_output_____
###Markdown
Putting it all together In this section, we integrate all the above mentioned parts **Changes made*** Here we use multiple augmented images.* First we use one image from the generator,and detect puzzle in it* We display the detected puzzle and ask the user to check * If the puzzle has been detected correctly then we proceed with solving and displaying process* If not,we take another(next) image from the generator and perform the same task* We continue this until the user accepts that the puzzle has been detected correctly
###Code
def check_val(board):
default=True
for row in board:
for dig in row:
if dig is None:
return False
return default
img=cv2.imread(args["image"])
img=img.reshape((1,4000,1824,3))
aug=ImageDataGenerator(rotation_range=3,zoom_range=0.1,width_shift_range=0.05,height_shift_range=0.05,shear_range=0.1,brightness_range=(1.2,1.5))
aug=aug.flow(img)
print("Loading the model..")
model=load_model(args["model"])
print("Model loaded successfully")
for attempt in range(10):
# Get image from generator
image=next(aug_img).astype("uint8").reshape((4000,1824,3))
print("Processing image")
image=resize(image,width=600)
# Use the fn we defined to get images of the puzzle with outline
(puzzleImage,warped)=find_puzzle(image,debug=args["debug"]>0)
# Initialize an empty 9*9 array where we can fill the digits(as numbers)
board=np.zeros((9,9),dtype="int")
# The warped image is composed of 9*9 cells
# So for one cell size we need to divide total img dimensions by 9
stepX=warped.shape[1]//9
stepY=warped.shape[0]//9
cellLocs=[] # Useful while displaying answer digits onto the question image
for y in range(0,9):
row=[]
for x in range(0,9):
startX=x*stepX
startY=y*stepY
endX=(x+1)*stepX
endY=(y+1)*stepY
row.append((startX,startY,endX,endY))
# get that cell from the whole image
cell=warped[startY:endY,startX:endX]
digit=extract_digit(cell,debug=args["debug"]>0)
if digit is not None:
# resize the digit image to mnist type and convert to array
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float")/255.0
roi=img_to_array(roi)
# SInce we trained on (28,28,1) images rather than (28,28)
roi=np.expand_dims(roi,axis=0)
if model.predict(roi).argmax(axis=1)[0] in [0,6,8]:
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float32")/255.0
roi=cv2.medianBlur(roi,5)
roi=img_to_array(roi)
# SInce we trained on (28,28,1) images rather than (28,28)
roi=np.expand_dims(roi,axis=0)
if model.predict(roi).argmax(axis=1)[0] in [1,7]:
roi=cv2.resize(digit,(28,28))
roi=roi.astype("float")/255.0
roi=cv2.GaussianBlur(roi,(5,5),2)
roi=img_to_array(roi)
# SInce we trained on (28,28,1) images rather than (28,28)
roi=np.expand_dims(roi,axis=0)
# Pass the digit image array through the model we trained
pred=model.predict(roi).argmax(axis=1)[0]
#plt.title(pred)
#plt.show()
board[y,x]=pred
cellLocs.append(row)
puzzle=Sudoku(3,3,board=board.tolist())
print("Sudoku board obtained from OCR..")
puzzle.show()
temp=0
# Using a text widget to receive user feedback
val=input("Has the puzzle been detected correctly? y or n ")
if val=='y':
solution=puzzle.solve()
if check_val(solution.board):
solution.show_full()
temp+=1
for (cellRow, boardRow) in zip(cellLocs, solution.board):
# Loop over individual cells in the row
for (box, digit) in zip(cellRow, boardRow):
# Unpack the cell coordinates
startX, startY, endX, endY = box
# Compute coordinates of where the digit will be drawn
# on the output puzzle image
textX = int((endX - startX) * 0.33)
textY = int((endY - startY) * -0.2)
textX += startX
textY += endY
# Draw the result digit on the Sudoku puzzle image
cv2.putText(puzzleImage, str(digit), (textX, textY),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 255), 2)
# Display the output image
cv2.imshow("Sudoku Result", puzzleImage)
cv2.waitKey(0)
break
else:
print("Invalid puzzle")
if(temp==0):
print("Unable to detect puzzle and its elements properly..Please try feeding a similar picture again")
###Output
Processing image
Sudoku board obtained from OCR..
+-------+-------+-------+
| 4 | | 6 1 |
| 9 | 8 | |
| 3 8 | 6 1 | 7 9 |
+-------+-------+-------+
| | 7 | 1 9 |
| | 5 | |
| 2 7 | 1 | |
+-------+-------+-------+
| 5 6 | 3 2 | 8 4 |
| | 5 | 9 |
| 9 2 | | 3 |
+-------+-------+-------+
Has the puzzle been detected correctly? y or n y
---------------------------
9x9 (3x3) SUDOKU PUZZLE
Difficulty: SOLVED
---------------------------
+-------+-------+-------+
| 7 5 4 | 2 9 3 | 6 1 8 |
| 6 1 9 | 8 7 4 | 2 3 5 |
| 2 3 8 | 5 6 1 | 4 7 9 |
+-------+-------+-------+
| 3 8 5 | 6 4 7 | 1 9 2 |
| 1 4 6 | 9 5 2 | 7 8 3 |
| 9 2 7 | 1 3 8 | 5 6 4 |
+-------+-------+-------+
| 5 6 1 | 3 2 9 | 8 4 7 |
| 8 7 3 | 4 1 5 | 9 2 6 |
| 4 9 2 | 7 8 6 | 3 5 1 |
+-------+-------+-------+
|
Tutorials/QueryTutorial.ipynb | ###Markdown
DSGRN Query Functions
###Code
from DSGRN import *
database = Database("querytest.db")
database.parametergraph.dimension()
###Output
_____no_output_____
###Markdown
We show here the network being considered in this example:
###Code
database
print(database.network.specification())
###Output
X1 : (X1)(~X3)
X2 : X1
X3 : (X1)(~X2)
###Markdown
Query OverviewIn order to perform queries on the database sometimes preprocessing is necessary. In order to give a uniform approach to this we have adopted a design where each query corresponds to a python class whose name ends with the suffix `Query`. Each class has a constructor (i.e. `__init__` method) which accepts some arguments to indicate parameters of the query (e.g. which database).We currently have the following queries:| Name | Query Parameters | Query Input | Query Output || ---- | ----------- | ------------ | --- || MonostableQuery | Database | Morse Graph Index | True/False || BistableQuery | Database | Morse Graph Index | True/False || MultistableQuery | Database | Morse Graph Index | True/False || SingleGeneQuery | Database, Name of Network Node | Reduced Parameter Index | Annotated Factor Graph || SingleFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || DoubleFixedPointQuery | Database, pair of Domain Bounds | Morse Graph Index | True/False || MonostableFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || InducibilityQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | Triple of True/False || HysteresisQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | True/False |When the query object is constructed, it is passed the required parameters and any preprocessing that is required to support the query is done. In some cases the preprocessing is trivial, and in other cases it may be more extensive. After the object is constructed, it can be used to perform queries. This is accomplished by invoking the objects `__call__` operator (i.e. treating the object as a function). The call operator receives the query input and returns the query output. For example:```single_gene_query = SingleGeneQuery(database, "X1")graph = single_gene_query(43)```In the first line, the query object is created with the query parameters `database` and `"X1"`. This results in computation being done to organize a table in the database to quickly support "Single Gene Queries". The created object `single_gene_query` has a method `__call__` which allows it to be called as a function in order to produce query results. The input of the `__call__` method is a "reduced parameter index" and what is returned will be an annotated graph structure specific to what this query does.In many cases the input to the query is a Morse Graph Index and the output is a boolean value which indicates whether or not the morse graph index is in a precomputed set of matches. These query classes typically also support another method `matches` which simply returns the set of matches. This allows the following code:```set_of_matches = SingleFixedPointQuery(database, domain_bounds).matches()```In this code, a query object is created, the `matches` method is called and returns the set of matches, but no reference to the query object is kept. When using this paradigm one should be careful not to unnecessarily create the same query multiple times, or else the same preprocessing step would be repeated. MonostableQuery, BistableQuery, and MultistableQuery
###Code
monostable_query_object = MonostableQuery(database)
bistable_query_object = BistableQuery(database)
multistable_query_object = MultistableQuery(database)
###Output
2017-10-24 13:17:41.078862:
MonostableQuery :: initializing
2017-10-24 13:17:41.080054:
MonostableQuery :: select MorseGraphIndex from (select MorseGraphIndex, count(*) as StableCount from (select MorseGraphIndex,Vertex from MorseGraphVertices except select MorseGraphIndex,Source from MorseGraphEdges) group by MorseGraphIndex) where StableCount=1;
2017-10-24 13:17:41.082888:
MonostableQuery :: constructed
###Markdown
Evaluate the query on a few Morse Graph Indices:
###Code
monostable_query_object(0)
monostable_query_object(1)
###Output
_____no_output_____
###Markdown
How many matches for each type of query?
###Code
print([len(monostable_query_object.matches()), len(bistable_query_object.matches()), len(multistable_query_object.matches())])
###Output
[45, 98, 110]
###Markdown
Print the list of Morse graph indices which satisfy the monostable query.
###Code
print(monostable_query_object.matches())
###Output
frozenset([0, 2, 3, 6, 9, 10, 11, 130, 18, 19, 20, 21, 153, 25, 26, 28, 30, 32, 34, 36, 38, 40, 43, 49, 50, 53, 55, 56, 59, 60, 74, 75, 78, 79, 89, 96, 131, 102, 104, 146, 113, 148, 122, 123, 127])
###Markdown
Directly verify that all returns matches satisfy the corresponding query:
###Code
all( monostable_query_object(mgi) for mgi in monostable_query_object.matches() )
database.DrawMorseGraph(131)
###Output
_____no_output_____
###Markdown
SingleGeneQueryOur interest is in fixing all combinatorial parameters except for the logic parameter corresponding to a single node and considering the set of parameters corresponding to this choice. Due to the factorization of the parameter graph, this set of parameters is isomorphic to the factor graph associated to the node of interest. In order to handle repeated queries efficiently, it is necessary to prepare a table which reorders information so that it is I/O efficient for algorithms to retrieve. The following does this:
###Code
single_gene_query = SingleGeneQuery(database, "X1")
###Output
2017-05-15 16:10:57.880238:
SingleGeneQuery(querytest.db, X1)
2017-05-15 16:10:57.885737:
SingleGeneQuery: FactorGraph generated
2017-05-15 16:10:57.886477:
SingleGeneQuery: SingleGeneQuery attribute missing from python database object.
2017-05-15 16:10:57.887072:
SingleGeneQuery: SingleGeneQuery attributes created.
2017-05-15 16:10:57.887746:
SingleGeneQuery: database structure unaware of gene X1
2017-05-15 16:10:57.888523:
SingleGeneQuery: sanitized X1
2017-05-15 16:10:57.889431:
SingleGeneQuery: cursor constructed
2017-05-15 16:10:57.890141:
SingleGeneQuery: checked for table
2017-05-15 16:10:57.890790:
SingleGeneQuery: added gene to python database object.
2017-05-15 16:10:57.891593:
SingleGeneQuery: constructed
###Markdown
For a single gene query, the queries are graphs isomorphic to the factor graph, and the number of such queries corresponds to the number of "reduced parameter indices". This will be explained in more depth shortly. To help explain this we first examine the following computation:
###Code
N = single_gene_query.number_of_gene_parameters()
M = single_gene_query.number_of_reduced_parameters()
L = database.parametergraph.size()
print([N, M, N*M, L])
###Output
[50L, 108L, 5400L, 5400L]
###Markdown
Importantly, this factorization corresponds to a way to convert a parameter index (an integer) into a pair of integers, one in [0,50) and the other in [0,108), which we call the _gene parameter index_ and the _reduced parameter index_. The manner in which this is done is technical and has to do with how the integers encode combinatorial parameters using a mixed-radix system. Roughly speaking, the gene parameter index is obtained by extracting a digit from the mixed-radix representation of the parameter index, and what remains after removing the digit entirely (not just setting it to 0) is the reduced parameter index. This process can be reversed as well, so both the original parameter index and the (GeneParameterIndex, ReducedParameterIndex) pair are equivalent representations. What the prepare step we just accomplished did was create a table with the database's information which sorted the information by ReducedParameterIndex first and GeneParameterIndex second. (The original database sorts by ParameterIndex.) Performing a single-gene queryNow we perform a query. The result which the query returns is a graph. This graph contains data which has the raw information obtained from the query in the form of a python dictionary (i,e, `{key1:value1, key2:value2,...}`) where the keys are gene parameter indices, and the values are tuples `(hexcode, parameter index, morsegraphindex)`
###Code
graph = single_gene_query(43) # 43 is a "reduced parameter index"
graph.data
###Output
_____no_output_____
###Markdown
The query above returns the "MorseGraphIndex" which can be used with the database to retrieve the Morse graph. However we might only want to know if the Morse graph has a certain property. For example, we might want to know if it has 1 minimal node, or multiple (2 or more) minimal nodes. We create a function which takes a "MorseGraphIndex" and returns True if the associated Morse graph has multiple minimal nodes and False otherwise. Visualizing the queryThe above information describes a partially ordered set. In this poset each node corresponds to a parameter index. Each parameter index corresponds to a pair of sub-indices called the "GeneParameterIndex" and the "ReducedParameterIndex" which are the integers resulting from splitting out the "digit" corresponding to the logic parameter of the gene of interest. The "GeneParameterIndex" corresponds directly to the logic parameter of the gene of interest which can also be represented with a "HexCode". Using the hex code representation we learn adjacency information (due to the GPG=CPG theorem). Since our query gives us all of this information, the query automatically determines this information and can display itself as a graph of the labelled poset corresponding to the query. It also comes equipped with some methods for checking graph properties (as we demonstrate later). The nodes themselves are labelled according to their "ParameterIndex" and "MorseGraphIndex":
###Code
graph
###Output
_____no_output_____
###Markdown
Features of the graph queryIn addition to being a graph there are other attributes of the query that are of use. In particular, The graph is as follows: * The vertices of the graph (`graph.vertices`) are named according to Gene Parameter Index (gpi). * `graph.edges` contains the directed edge p -> q iff p < q and the associated logic parameters are adjacent.* The graph is (by default) labelled with pairs (Parameter index, Morse graph index). The default graph labelling can be changed by replacing the `label` attribute with a new function. A `label` function takes the vertex name (i.e. gpi) as input and returns a label string.* The graph is (by default) colored blue. The default graph coloring can be changed by replacing teh `color` attribute with a new function. A `color` function takes the vertex name as an input and returns a new color string.In addition the following extra structures are provided:* `graph.data` is a dictionary from gene parameter index to (hex code, parameter index, morse graph index)* `graph.mgi` is a function which accepts a gpi and returns the associated Morse graph idnex* `graph.num_inputs` is the number of network edges which are inputs to the gene associated with the query* `graph.num_outputs`is the number of network edges which are outputs to the gene associated with the query* `graph.essential` is a boolean-valued function which determines if each vertex corresponds to an essential parameter node Changing the color to inspect node propertiesIn the above graph all the nodes have the same color. We can change this so that the color of the nodes reflects some property of our choosing. As an example, we might ask if a node has a Morse graph with multistability -- if so, we can color the node red, otherwise we can color the node blue. This is done as follows:
###Code
# Create a function which tells us if each vertex has the multistable property:
is_multistable = MultistableQuery(database)
# Change the coloring method of the graph to check for multistability:
graph.color = lambda v : "red" if is_multistable(v) else "blue"
# Display the graph:
graph
###Output
_____no_output_____
###Markdown
Testing the query resultThe above query indicates that some of the parameters associated with the query had multistability and some did not. In order to make sure everything is working properly, let's take an example of each class and draw the Morse graph. For instance, parameter index 2199 has Morse Graph 18, and is colored blue, which is supposed to correspond to a lack of multistability. We check this and find it is indeed the case:
###Code
database.DrawMorseGraph(18)
###Output
_____no_output_____
###Markdown
Similarly, our query result indicates parameter index 2180 corresponds to Morse Graph 84, which is colored red, indicated it _does_ exhibit multistability. We check this as well:
###Code
database.DrawMorseGraph(84)
###Output
_____no_output_____
###Markdown
SingleFixedPointQuery, DoubleFixedPointQueryWe have the capability to retrieve parameter indices for which a FP occurs in a certain location. We call these locations "domains". A domain can be indicated by which "bin" it corresponds to along each dimension. A bin is an interval bounded by either (a) consecutive thresholds in a given dimension, (b) between 0 and the first threshold, or (c) bounded below by the last threshold and unbounded above. In particular, for each dimension the number of thresholds is equal to the number of out-edges of the corresponding network node. If there are m such thresholds then there are m+1 locations (bins) along this dimension which we label 0, 1, 2, ..., m. This allows us to describe the location of a domain by listing bin numbers for each dimension.We can consider many domains at once which are grouped together in rectangular prisms. To represent these, we create a dictionary object where for each variable we product a key value pair where the key is the variable name and the value is a list of two integers [a,b] such that we mean that the variable can only occur in the bins between a and b (inclusive). If we omit a variable from the dictionary it is allowed to be in any bin. Also, if a=b we can simply write "a" instead of "[a,a]". For example:
###Code
bounds110 = {"X1":1,"X2":1,"X3":0} # Domain 1,1,0
bounds210 = {"X1":[2,2],"X2":[1,1],"X3":[0,1]} # Domain 2,1,0 or Domain 2,1,1
bounds311 = {"X1":[3,3],"X2":[1,1],"X3":[1,1]} # Domain 3,1,1
###Output
_____no_output_____
###Markdown
Using these "bounds" variables to represent groups of domains, we can use query functions which ask for the collection of morse graphs which have an "FP" node labelled with a domain in those bounds. For example, to find the set of Morse Graph indices corresponding to fixed points in the region specified by "bounds110":
###Code
matches110 = SingleFixedPointQuery(database, bounds110).matches()
###Output
2017-05-15 16:10:58.070742:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.071756:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.072693:
MatchQuery({'X2': 1, 'X3': 0, 'X1': 1}, Matches)
2017-05-15 16:10:58.073469:
MatchQuery :: built expressions ["Label like 'FP { 1, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%'"]
2017-05-15 16:10:58.074192:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 1, _, _%';
2017-05-15 16:10:58.075960:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.076738:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%';
2017-05-15 16:10:58.077579:
MatchQuery :: constructed
2017-05-15 16:10:58.078225:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.079000:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.079720:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds210":
###Code
matches210 = SingleFixedPointQuery(database, bounds210).matches()
###Output
2017-05-15 16:10:58.084631:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.085543:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.086846:
MatchQuery({'X2': [1, 1], 'X3': [0, 1], 'X1': [2, 2]}, Matches)
2017-05-15 16:10:58.087479:
MatchQuery :: built expressions ["Label like 'FP { 2, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%'"]
2017-05-15 16:10:58.088084:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 2, _, _%';
2017-05-15 16:10:58.089078:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.089944:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%';
2017-05-15 16:10:58.090938:
MatchQuery :: constructed
2017-05-15 16:10:58.091900:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.092890:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.093663:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds311":
###Code
matches311 = SingleFixedPointQuery(database, bounds311).matches()
###Output
2017-05-15 16:10:58.098953:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.100141:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.100989:
MatchQuery({'X2': [1, 1], 'X3': [1, 1], 'X1': [3, 3]}, Matches)
2017-05-15 16:10:58.101795:
MatchQuery :: built expressions ["Label like 'FP { 3, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 1%'"]
2017-05-15 16:10:58.102456:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 3, _, _%';
2017-05-15 16:10:58.103371:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.104231:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 1%';
2017-05-15 16:10:58.104981:
MatchQuery :: constructed
2017-05-15 16:10:58.105630:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.106495:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.107423:
SingleFixedPointQuery :: constructed
###Markdown
Find the set of Morse Graph indices with both a fixed point in 1,1,0 and a fixed point in 3,1,1:
###Code
matches_both = DoubleFixedPointQuery(database, bounds110,bounds311).matches()
len(matches110), len(matches210), len(matches311), len(matches_both)
matches_both
###Output
_____no_output_____
###Markdown
Queries on Graph PropertiesIt is possible to make queries about graph properties. If we have developed a set of queries about the vertices, we can ask several kinds of questions:1) Does the minimal node have a certain property?2) Does the maximal node have a certain property?3) Must every path from the minimal node to the maximal node pass through a node with a certain property?We can even ask questions about how many paths from the minimal node to the maximal node have a certain property (or the fraction of paths). To help visualize the examples we color the graph "green", "blue", "red", and "yellow" according to each vertex's status with regard to the FP location query examples above. Specifically:
###Code
graph.color = lambda v : "green" if graph.mgi(v) in matches_both else ("blue" if graph.mgi(v) in matches210 else ( "yellow" if graph.mgi(v) in matches311 else "red"))
graph
minimum_gpi = 0
maximum_gpi = len(graph.vertices) - 1
###Output
_____no_output_____
###Markdown
Q1. Is the minimal node red?
###Code
graph.color(minimum_gpi) == "red"
###Output
_____no_output_____
###Markdown
Q2. Is the maximal node yellow?
###Code
graph.color(maximum_gpi) == "yellow"
###Output
_____no_output_____
###Markdown
Q3(a). Is there an essential green node?
###Code
any( graph.essential(v) and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
List all essential green nodes:
###Code
[v for v in graph.vertices if graph.essential(v) and graph.color(v) == "green"]
###Output
_____no_output_____
###Markdown
Q3(b). Does every path from min to max pass through green?
###Code
predicate = lambda v : graph.color(v) == "green"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
No, they don't. What percentage of them pass through green?
###Code
subgraph = graph.subgraph(lambda v : not predicate(v))
number_missing_green = subgraph.numberOfPaths(minimum_gpi,maximum_gpi)
total_number = graph.numberOfPaths(minimum_gpi,maximum_gpi)
print str((1.0 - float(number_missing_green)/float(total_number))*100.0) + "%"
###Output
11.0929853181%
###Markdown
Q3(b)'. Does every path from min to max pass through a blue vertex?
###Code
predicate = lambda v : graph.color(v) == "blue"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
Which means there are zero paths from minimum to maximum in the subgraph where we take out the blue vertices, correct?
###Code
subgraph = graph.subgraph(lambda v : graph.color(v) != "blue")
if subgraph.numberOfPaths(minimum_gpi,maximum_gpi) == 0: print("Correct.")
###Output
Correct.
###Markdown
Q3(c). Is there an intermediate (neither max nor min) green node?
###Code
any( v != minimum_gpi and v != maximum_gpi and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
Visualizing the Essential parameter nodes:
###Code
graph.color = lambda v : "red" if graph.essential(v) else "green"
graph
###Output
_____no_output_____
###Markdown
InducibilityQuery
###Code
inducibility_query_object = InducibilityQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, inducibility_query_object.GeneQuery.number_of_reduced_parameters())
[ inducibility_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2017-05-15 16:10:58.353433:
SingleGeneQuery(0)
2017-05-15 16:10:58.354894:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.355778:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.356459:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.357104:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.357759:
SingleGeneQuery(1)
2017-05-15 16:10:58.358425:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.359456:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.360496:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.361434:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.362208:
SingleGeneQuery(2)
2017-05-15 16:10:58.362863:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.363653:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.364236:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.364833:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.365628:
SingleGeneQuery(3)
2017-05-15 16:10:58.366598:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.367553:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.368145:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.368834:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.369519:
SingleGeneQuery(4)
2017-05-15 16:10:58.370149:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.371194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.371773:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.372440:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.373106:
SingleGeneQuery(5)
2017-05-15 16:10:58.373705:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.374781:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.375421:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.376008:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.377098:
SingleGeneQuery(6)
2017-05-15 16:10:58.377754:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.378449:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.379010:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.379593:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.380218:
SingleGeneQuery(7)
2017-05-15 16:10:58.380860:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.383934:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.384591:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.385307:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.385999:
SingleGeneQuery(8)
2017-05-15 16:10:58.386715:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.387695:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.388506:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.389115:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.389772:
SingleGeneQuery(9)
2017-05-15 16:10:58.390627:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.391484:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.392165:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.392831:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.393500:
SingleGeneQuery(10)
2017-05-15 16:10:58.394136:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.394892:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.395520:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.396098:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.396770:
SingleGeneQuery(11)
2017-05-15 16:10:58.397470:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.398428:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.398969:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.399582:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.400219:
SingleGeneQuery(12)
2017-05-15 16:10:58.400857:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.401668:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.402247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.402857:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.403605:
SingleGeneQuery(13)
2017-05-15 16:10:58.404810:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.405697:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.406392:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.407027:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.407631:
SingleGeneQuery(14)
2017-05-15 16:10:58.408333:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.409138:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.409987:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.410882:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.411985:
SingleGeneQuery(15)
2017-05-15 16:10:58.412748:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.413834:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.414470:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.415093:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.415705:
SingleGeneQuery(16)
2017-05-15 16:10:58.416423:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.417442:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.418071:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.418720:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.419324:
SingleGeneQuery(17)
2017-05-15 16:10:58.420092:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.421147:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.421757:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.422458:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.423037:
SingleGeneQuery(18)
2017-05-15 16:10:58.423805:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.424666:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.425200:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.425894:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.426518:
SingleGeneQuery(19)
2017-05-15 16:10:58.427133:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.427888:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.428439:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.429060:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.429637:
SingleGeneQuery(20)
2017-05-15 16:10:58.430324:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.431114:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.431678:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.432295:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.432865:
SingleGeneQuery(21)
2017-05-15 16:10:58.433497:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.434347:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.435186:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.435942:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.436614:
SingleGeneQuery(22)
2017-05-15 16:10:58.437431:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.438952:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.439673:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.440371:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.441083:
SingleGeneQuery(23)
2017-05-15 16:10:58.441803:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.442775:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.443372:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.443936:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.444568:
SingleGeneQuery(24)
2017-05-15 16:10:58.445166:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.445872:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.446492:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.446985:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.447541:
SingleGeneQuery(25)
2017-05-15 16:10:58.448196:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.448878:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.449483:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.450093:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.450804:
SingleGeneQuery(26)
2017-05-15 16:10:58.451410:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.452330:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.452917:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.453622:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.454402:
SingleGeneQuery(27)
2017-05-15 16:10:58.455195:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.456220:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.457031:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.457571:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.458217:
SingleGeneQuery(28)
2017-05-15 16:10:58.458951:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.459837:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.460826:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.461492:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.462125:
SingleGeneQuery(29)
2017-05-15 16:10:58.462769:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.463577:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.464131:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.464711:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.465373:
SingleGeneQuery(30)
2017-05-15 16:10:58.466197:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.467336:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.468027:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.468665:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.469309:
SingleGeneQuery(31)
2017-05-15 16:10:58.470025:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.470913:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.471761:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.472342:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.473070:
SingleGeneQuery(32)
2017-05-15 16:10:58.473719:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.474520:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.475099:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.475671:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.476242:
SingleGeneQuery(33)
2017-05-15 16:10:58.477011:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.478076:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.478669:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.479289:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.479913:
SingleGeneQuery(34)
2017-05-15 16:10:58.480506:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.481204:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.481823:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.482461:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.483081:
SingleGeneQuery(35)
2017-05-15 16:10:58.483951:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.484750:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.485376:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.486062:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.486758:
SingleGeneQuery(36)
2017-05-15 16:10:58.487703:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.488572:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.489313:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.489920:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.490572:
SingleGeneQuery(37)
2017-05-15 16:10:58.491254:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.492124:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.492742:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.493419:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.494334:
SingleGeneQuery(38)
2017-05-15 16:10:58.495108:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.495901:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.496455:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.497096:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.497756:
SingleGeneQuery(39)
2017-05-15 16:10:58.498451:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.499256:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.499804:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.500356:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.501189:
SingleGeneQuery(40)
2017-05-15 16:10:58.501786:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.502548:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.503246:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.503884:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.504697:
SingleGeneQuery(41)
2017-05-15 16:10:58.505899:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.507514:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.508202:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.508803:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.509452:
SingleGeneQuery(42)
2017-05-15 16:10:58.510125:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.512193:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.513145:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.513762:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.514322:
SingleGeneQuery(43)
2017-05-15 16:10:58.514941:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.516390:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.517170:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.517759:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.518489:
SingleGeneQuery(44)
2017-05-15 16:10:58.519243:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.520580:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.521290:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.522376:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.523243:
SingleGeneQuery(45)
2017-05-15 16:10:58.523966:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.526406:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.527367:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.528332:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.528891:
SingleGeneQuery(46)
2017-05-15 16:10:58.529581:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.530826:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.531477:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.532084:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.532742:
SingleGeneQuery(47)
2017-05-15 16:10:58.533459:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.535114:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.535745:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.536359:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.537062:
SingleGeneQuery(48)
2017-05-15 16:10:58.537714:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.538605:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.539269:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.539904:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.540650:
SingleGeneQuery(49)
2017-05-15 16:10:58.541673:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.542710:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.543357:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.544040:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.544749:
SingleGeneQuery(50)
2017-05-15 16:10:58.545301:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.546032:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.546616:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.547233:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.547795:
SingleGeneQuery(51)
2017-05-15 16:10:58.548469:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.549516:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.550217:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.550804:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.551415:
SingleGeneQuery(52)
2017-05-15 16:10:58.552114:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.553625:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.554769:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.555386:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.556166:
SingleGeneQuery(53)
2017-05-15 16:10:58.556811:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.558333:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.558978:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.559668:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.560516:
SingleGeneQuery(54)
2017-05-15 16:10:58.561684:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.563365:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.564115:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.564783:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.565422:
SingleGeneQuery(55)
2017-05-15 16:10:58.566112:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.567761:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.568507:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.569184:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.569970:
SingleGeneQuery(56)
2017-05-15 16:10:58.570616:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.572115:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.572792:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.573421:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.574216:
SingleGeneQuery(57)
2017-05-15 16:10:58.574960:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.576706:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.577388:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.578029:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.578862:
SingleGeneQuery(58)
2017-05-15 16:10:58.579583:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.581140:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.581695:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.582378:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.583083:
SingleGeneQuery(59)
2017-05-15 16:10:58.583771:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.585598:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.586277:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.587304:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.588272:
SingleGeneQuery(60)
2017-05-15 16:10:58.589508:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.591324:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.592195:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.593620:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.595310:
SingleGeneQuery(61)
2017-05-15 16:10:58.596178:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.598403:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.599319:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.599815:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.600790:
SingleGeneQuery(62)
2017-05-15 16:10:58.602121:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.604324:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.605062:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.606041:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.606976:
SingleGeneQuery(63)
2017-05-15 16:10:58.608472:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.610843:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.612144:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.612888:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.613629:
SingleGeneQuery(64)
2017-05-15 16:10:58.614322:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.615527:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.616142:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.616682:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.617365:
SingleGeneQuery(65)
2017-05-15 16:10:58.618327:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.619721:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.620366:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.621077:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.622296:
SingleGeneQuery(66)
2017-05-15 16:10:58.623091:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.625145:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.625883:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.627214:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.628380:
SingleGeneQuery(67)
2017-05-15 16:10:58.629387:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.632366:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.633353:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.634204:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.635014:
SingleGeneQuery(68)
2017-05-15 16:10:58.635831:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.637418:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.638055:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.638592:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.639299:
SingleGeneQuery(69)
2017-05-15 16:10:58.639871:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.641452:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.642080:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.643108:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.643724:
SingleGeneQuery(70)
2017-05-15 16:10:58.644431:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.645544:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.646139:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.646813:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.647413:
SingleGeneQuery(71)
2017-05-15 16:10:58.648139:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.649949:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.650646:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.651658:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.652347:
SingleGeneQuery(72)
2017-05-15 16:10:58.653343:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.654841:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.655626:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.656253:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.656885:
SingleGeneQuery(73)
2017-05-15 16:10:58.657492:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.659022:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.659997:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.660596:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.661265:
SingleGeneQuery(74)
2017-05-15 16:10:58.662147:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.664049:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.664679:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.665280:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.666030:
SingleGeneQuery(75)
2017-05-15 16:10:58.666722:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.669878:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.670590:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.671229:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.672120:
SingleGeneQuery(76)
2017-05-15 16:10:58.672799:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.674445:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.675502:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.676080:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.676843:
SingleGeneQuery(77)
2017-05-15 16:10:58.677570:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.679247:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.680036:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.681132:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.682185:
SingleGeneQuery(78)
2017-05-15 16:10:58.682908:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.684366:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.685013:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.685615:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.686488:
SingleGeneQuery(79)
2017-05-15 16:10:58.687127:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.688676:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.689675:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.690299:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.691046:
SingleGeneQuery(80)
2017-05-15 16:10:58.691664:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.693187:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.693815:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.694363:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.695383:
SingleGeneQuery(81)
2017-05-15 16:10:58.696205:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.697610:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.698326:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.698941:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.699584:
SingleGeneQuery(82)
2017-05-15 16:10:58.700268:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.702113:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.702749:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.703392:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.704074:
SingleGeneQuery(83)
2017-05-15 16:10:58.704843:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.706320:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.706904:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.707557:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.708622:
SingleGeneQuery(84)
2017-05-15 16:10:58.709401:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.710862:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.711610:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.712183:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.713206:
SingleGeneQuery(85)
2017-05-15 16:10:58.714193:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.715301:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.715841:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.716425:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.717094:
SingleGeneQuery(86)
2017-05-15 16:10:58.717803:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.719445:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.720119:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.720889:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.722009:
SingleGeneQuery(87)
2017-05-15 16:10:58.722699:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.724456:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.725247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.725840:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.726539:
SingleGeneQuery(88)
2017-05-15 16:10:58.727481:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.729378:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.730484:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.731025:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.731637:
SingleGeneQuery(89)
2017-05-15 16:10:58.732323:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.733640:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.734436:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.735075:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.736050:
SingleGeneQuery(90)
2017-05-15 16:10:58.736777:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.738562:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.739800:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.740543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.741208:
SingleGeneQuery(91)
2017-05-15 16:10:58.741970:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.743804:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.744602:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.745326:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.746519:
SingleGeneQuery(92)
2017-05-15 16:10:58.747200:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.748870:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.749492:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.750258:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.751169:
SingleGeneQuery(93)
2017-05-15 16:10:58.751853:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.752945:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.753639:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.754497:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.755349:
SingleGeneQuery(94)
2017-05-15 16:10:58.756373:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.757295:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.757862:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.758457:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.759202:
SingleGeneQuery(95)
2017-05-15 16:10:58.760038:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.761346:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.762378:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.763024:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.763727:
SingleGeneQuery(96)
2017-05-15 16:10:58.764401:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.765835:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.766485:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.767159:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.767860:
SingleGeneQuery(97)
2017-05-15 16:10:58.768619:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.770677:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.771728:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.772550:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.773262:
SingleGeneQuery(98)
2017-05-15 16:10:58.774016:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.775390:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.776049:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.776677:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.778012:
SingleGeneQuery(99)
2017-05-15 16:10:58.778721:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.779876:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.780431:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.780977:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.781674:
SingleGeneQuery(100)
2017-05-15 16:10:58.782267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.783554:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.784107:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.784696:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.785435:
SingleGeneQuery(101)
2017-05-15 16:10:58.786257:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.787678:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.788295:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.789359:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.790382:
SingleGeneQuery(102)
2017-05-15 16:10:58.791029:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.792591:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.793284:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.794064:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.794982:
SingleGeneQuery(103)
2017-05-15 16:10:58.795747:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.796616:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.797160:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.797702:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.798278:
SingleGeneQuery(104)
2017-05-15 16:10:58.798957:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.800753:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.801565:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.802162:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.802922:
SingleGeneQuery(105)
2017-05-15 16:10:58.803968:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.805925:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.806638:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.807420:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.808211:
SingleGeneQuery(106)
2017-05-15 16:10:58.809041:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.811137:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.811960:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.812609:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.813409:
SingleGeneQuery(107)
2017-05-15 16:10:58.814071:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.814800:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.815366:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.816046:
SingleGeneQuery: graph attributes emplaced
###Markdown
HysteresisQuery
###Code
hysteresis_query_object = HysteresisQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, hysteresis_query_object.GeneQuery.number_of_reduced_parameters())
[ hysteresis_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2017-05-15 16:10:58.902577:
HysteresisQuery(0)
2017-05-15 16:10:58.903380:
SingleGeneQuery(0)
2017-05-15 16:10:58.904808:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.905998:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.906682:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.907365:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.907947:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.910048:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.911135:
HysteresisQuery: Alignment Graph has 0 vertices
2017-05-15 16:10:58.911872:
HysteresisQuery: Alignment Graph has 0 edges
2017-05-15 16:10:58.912592:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.913212:
HysteresisQuery: Returning.
2017-05-15 16:10:58.913848:
HysteresisQuery(1)
2017-05-15 16:10:58.914604:
SingleGeneQuery(1)
2017-05-15 16:10:58.915295:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.916995:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.917689:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.918311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.918992:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.919739:
HysteresisQuery: Returning.
2017-05-15 16:10:58.920461:
HysteresisQuery(2)
2017-05-15 16:10:58.921137:
SingleGeneQuery(2)
2017-05-15 16:10:58.922005:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.923331:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.923920:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.924894:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.925618:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.927480:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.928211:
HysteresisQuery: Alignment Graph has 5 vertices
2017-05-15 16:10:58.929059:
HysteresisQuery: Alignment Graph has 5 edges
2017-05-15 16:10:58.929741:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.930348:
HysteresisQuery: Returning.
2017-05-15 16:10:58.931161:
HysteresisQuery(3)
2017-05-15 16:10:58.932183:
SingleGeneQuery(3)
2017-05-15 16:10:58.932949:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.934306:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.935046:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.936011:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.937113:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.937819:
HysteresisQuery: Returning.
2017-05-15 16:10:58.938517:
HysteresisQuery(4)
2017-05-15 16:10:58.939502:
SingleGeneQuery(4)
2017-05-15 16:10:58.940342:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.942338:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.943003:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.943696:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.944314:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.945090:
HysteresisQuery: Returning.
2017-05-15 16:10:58.945748:
HysteresisQuery(5)
2017-05-15 16:10:58.946478:
SingleGeneQuery(5)
2017-05-15 16:10:58.947445:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.948184:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.948733:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.949310:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.950000:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.950710:
HysteresisQuery: Returning.
2017-05-15 16:10:58.951455:
HysteresisQuery(6)
2017-05-15 16:10:58.952710:
SingleGeneQuery(6)
2017-05-15 16:10:58.953471:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.954912:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.955584:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.956507:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.957166:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.957860:
HysteresisQuery: Returning.
2017-05-15 16:10:58.958808:
HysteresisQuery(7)
2017-05-15 16:10:58.959560:
SingleGeneQuery(7)
2017-05-15 16:10:58.960658:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.961646:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.962249:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.962966:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.964020:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.965669:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.966247:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:58.966769:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:58.967386:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.968052:
HysteresisQuery: Returning.
2017-05-15 16:10:58.969019:
HysteresisQuery(8)
2017-05-15 16:10:58.969605:
SingleGeneQuery(8)
2017-05-15 16:10:58.970336:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.971906:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.972574:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.973543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.974106:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.976146:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.976840:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:58.977497:
HysteresisQuery: Alignment Graph has 17 edges
2017-05-15 16:10:58.978204:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.978955:
HysteresisQuery: Returning.
2017-05-15 16:10:58.979647:
HysteresisQuery(9)
2017-05-15 16:10:58.980219:
SingleGeneQuery(9)
2017-05-15 16:10:58.981026:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.982593:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.983251:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.983916:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.984679:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.985524:
HysteresisQuery: Returning.
2017-05-15 16:10:58.986477:
HysteresisQuery(10)
2017-05-15 16:10:58.987614:
SingleGeneQuery(10)
2017-05-15 16:10:58.988629:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.990807:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.991625:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.992311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.993238:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.994017:
HysteresisQuery: Returning.
2017-05-15 16:10:58.994755:
HysteresisQuery(11)
2017-05-15 16:10:58.995484:
SingleGeneQuery(11)
2017-05-15 16:10:58.996191:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.998134:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.998861:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.999543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.000169:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.000920:
HysteresisQuery: Returning.
2017-05-15 16:10:59.001702:
HysteresisQuery(12)
2017-05-15 16:10:59.002356:
SingleGeneQuery(12)
2017-05-15 16:10:59.003104:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.004450:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.005065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.005943:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.006843:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.007595:
HysteresisQuery: Returning.
2017-05-15 16:10:59.008673:
HysteresisQuery(13)
2017-05-15 16:10:59.009242:
SingleGeneQuery(13)
2017-05-15 16:10:59.010023:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.011615:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.012278:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.013088:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.013693:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.015332:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.016033:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.016881:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.017591:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.018199:
HysteresisQuery: Returning.
2017-05-15 16:10:59.018818:
HysteresisQuery(14)
2017-05-15 16:10:59.019446:
SingleGeneQuery(14)
2017-05-15 16:10:59.020220:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.021133:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.021834:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.022746:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.023534:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.024399:
HysteresisQuery: Returning.
2017-05-15 16:10:59.024990:
HysteresisQuery(15)
2017-05-15 16:10:59.025591:
SingleGeneQuery(15)
2017-05-15 16:10:59.027014:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.029072:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.029813:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.030511:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.031094:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.031898:
HysteresisQuery: Returning.
2017-05-15 16:10:59.032490:
HysteresisQuery(16)
2017-05-15 16:10:59.033130:
SingleGeneQuery(16)
2017-05-15 16:10:59.033784:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.035360:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.036009:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.036657:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.037307:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.038082:
HysteresisQuery: Returning.
2017-05-15 16:10:59.038853:
HysteresisQuery(17)
2017-05-15 16:10:59.039787:
SingleGeneQuery(17)
2017-05-15 16:10:59.040417:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.041563:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.042247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.043859:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.044520:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.045334:
HysteresisQuery: Returning.
2017-05-15 16:10:59.045903:
HysteresisQuery(18)
2017-05-15 16:10:59.046467:
SingleGeneQuery(18)
2017-05-15 16:10:59.047162:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.048473:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.049201:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.049986:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.050919:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.051778:
HysteresisQuery: Returning.
2017-05-15 16:10:59.052613:
HysteresisQuery(19)
2017-05-15 16:10:59.053291:
SingleGeneQuery(19)
2017-05-15 16:10:59.054032:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.055353:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.055966:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.057071:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.057676:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.058964:
HysteresisQuery: Returning.
2017-05-15 16:10:59.059607:
HysteresisQuery(20)
2017-05-15 16:10:59.060203:
SingleGeneQuery(20)
2017-05-15 16:10:59.061025:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.063321:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.063962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.064618:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.065289:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.065990:
HysteresisQuery: Returning.
2017-05-15 16:10:59.066858:
HysteresisQuery(21)
2017-05-15 16:10:59.067435:
SingleGeneQuery(21)
2017-05-15 16:10:59.068095:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.069749:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.070459:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.071365:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.072181:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.073097:
HysteresisQuery: Returning.
2017-05-15 16:10:59.073673:
HysteresisQuery(22)
2017-05-15 16:10:59.074284:
SingleGeneQuery(22)
2017-05-15 16:10:59.075093:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.076762:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.077470:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.078363:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.078960:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.079646:
HysteresisQuery: Returning.
2017-05-15 16:10:59.080246:
HysteresisQuery(23)
2017-05-15 16:10:59.080846:
SingleGeneQuery(23)
2017-05-15 16:10:59.081562:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.082997:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.083616:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.084219:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.084861:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.085622:
HysteresisQuery: Returning.
2017-05-15 16:10:59.086305:
HysteresisQuery(24)
2017-05-15 16:10:59.087071:
SingleGeneQuery(24)
2017-05-15 16:10:59.087698:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.088541:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.089242:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.089970:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.090765:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.091447:
HysteresisQuery: Returning.
2017-05-15 16:10:59.092095:
HysteresisQuery(25)
2017-05-15 16:10:59.092684:
SingleGeneQuery(25)
2017-05-15 16:10:59.093507:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.094550:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.095327:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.096054:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.096671:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.097426:
HysteresisQuery: Returning.
2017-05-15 16:10:59.098036:
HysteresisQuery(26)
2017-05-15 16:10:59.098650:
SingleGeneQuery(26)
2017-05-15 16:10:59.099509:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.100328:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.100904:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.101527:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.102153:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.102826:
HysteresisQuery: Returning.
2017-05-15 16:10:59.103482:
HysteresisQuery(27)
2017-05-15 16:10:59.104372:
SingleGeneQuery(27)
2017-05-15 16:10:59.105196:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.107066:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.107838:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.108660:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.109256:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.110084:
HysteresisQuery: Returning.
2017-05-15 16:10:59.110710:
HysteresisQuery(28)
2017-05-15 16:10:59.111378:
SingleGeneQuery(28)
2017-05-15 16:10:59.112303:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.113826:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.114577:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.115232:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.115890:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.133589:
HysteresisQuery: Returning.
2017-05-15 16:10:59.134186:
HysteresisQuery(29)
2017-05-15 16:10:59.134809:
SingleGeneQuery(29)
2017-05-15 16:10:59.135675:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.137072:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.137819:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.138445:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.139220:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.140138:
HysteresisQuery: Returning.
2017-05-15 16:10:59.141003:
HysteresisQuery(30)
2017-05-15 16:10:59.141647:
SingleGeneQuery(30)
2017-05-15 16:10:59.142509:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.144353:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.145374:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.146374:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.147069:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.147910:
HysteresisQuery: Returning.
2017-05-15 16:10:59.148530:
HysteresisQuery(31)
2017-05-15 16:10:59.149158:
SingleGeneQuery(31)
2017-05-15 16:10:59.149903:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.151188:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.151730:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.152392:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.152985:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.153728:
HysteresisQuery: Returning.
2017-05-15 16:10:59.154547:
HysteresisQuery(32)
2017-05-15 16:10:59.155106:
SingleGeneQuery(32)
2017-05-15 16:10:59.155816:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.157485:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.158112:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.159238:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.159876:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.161096:
HysteresisQuery: Returning.
2017-05-15 16:10:59.161837:
HysteresisQuery(33)
2017-05-15 16:10:59.162622:
SingleGeneQuery(33)
2017-05-15 16:10:59.163355:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.164487:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.165065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.165653:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.166635:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.167294:
HysteresisQuery: Returning.
2017-05-15 16:10:59.167901:
HysteresisQuery(34)
2017-05-15 16:10:59.168463:
SingleGeneQuery(34)
2017-05-15 16:10:59.169080:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.170700:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.171290:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.172051:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.172926:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.174014:
HysteresisQuery: Returning.
2017-05-15 16:10:59.174742:
HysteresisQuery(35)
2017-05-15 16:10:59.175388:
SingleGeneQuery(35)
2017-05-15 16:10:59.176129:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.178194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.179240:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.179848:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.180472:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.181198:
HysteresisQuery: Returning.
2017-05-15 16:10:59.181750:
HysteresisQuery(36)
2017-05-15 16:10:59.182293:
SingleGeneQuery(36)
2017-05-15 16:10:59.182921:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.184325:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.184931:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.185481:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.186402:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.187155:
HysteresisQuery: Returning.
2017-05-15 16:10:59.188056:
HysteresisQuery(37)
2017-05-15 16:10:59.188726:
SingleGeneQuery(37)
2017-05-15 16:10:59.189729:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.191592:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.192395:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.193052:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.193761:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.195566:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.196475:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.197076:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.197733:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.198502:
HysteresisQuery: Returning.
2017-05-15 16:10:59.199395:
HysteresisQuery(38)
2017-05-15 16:10:59.199933:
SingleGeneQuery(38)
2017-05-15 16:10:59.200581:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.202178:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.202897:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.203629:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.204317:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.205148:
HysteresisQuery: Returning.
2017-05-15 16:10:59.205731:
HysteresisQuery(39)
2017-05-15 16:10:59.206419:
SingleGeneQuery(39)
2017-05-15 16:10:59.207140:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.208401:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.209008:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.209657:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.210644:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.211457:
HysteresisQuery: Returning.
2017-05-15 16:10:59.212741:
HysteresisQuery(40)
2017-05-15 16:10:59.213296:
SingleGeneQuery(40)
2017-05-15 16:10:59.213971:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.215618:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.216326:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.217036:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.217763:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.218467:
HysteresisQuery: Returning.
2017-05-15 16:10:59.219023:
HysteresisQuery(41)
2017-05-15 16:10:59.219946:
SingleGeneQuery(41)
2017-05-15 16:10:59.220638:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.221879:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.222647:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.223351:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.224183:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.225608:
HysteresisQuery: Returning.
2017-05-15 16:10:59.226856:
HysteresisQuery(42)
2017-05-15 16:10:59.227565:
SingleGeneQuery(42)
2017-05-15 16:10:59.228438:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.229993:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.230957:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.231562:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.232256:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.233054:
HysteresisQuery: Returning.
2017-05-15 16:10:59.234030:
HysteresisQuery(43)
2017-05-15 16:10:59.234659:
SingleGeneQuery(43)
2017-05-15 16:10:59.235422:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.237588:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.238455:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.239059:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.239594:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.241047:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.241731:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.242572:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.243175:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.243747:
HysteresisQuery: Returning.
2017-05-15 16:10:59.244397:
HysteresisQuery(44)
2017-05-15 16:10:59.244972:
SingleGeneQuery(44)
2017-05-15 16:10:59.245859:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.247160:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.248281:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.248892:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.249427:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.250107:
HysteresisQuery: Returning.
2017-05-15 16:10:59.250647:
HysteresisQuery(45)
2017-05-15 16:10:59.251182:
SingleGeneQuery(45)
2017-05-15 16:10:59.251921:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.253722:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.254367:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.255042:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.255805:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.256841:
HysteresisQuery: Returning.
2017-05-15 16:10:59.258024:
HysteresisQuery(46)
2017-05-15 16:10:59.258671:
SingleGeneQuery(46)
2017-05-15 16:10:59.259342:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.260873:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.261541:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.262168:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.263028:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.264609:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.265730:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.266521:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.267167:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.267760:
HysteresisQuery: Returning.
2017-05-15 16:10:59.268369:
HysteresisQuery(47)
2017-05-15 16:10:59.269056:
SingleGeneQuery(47)
2017-05-15 16:10:59.269708:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.271671:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.272350:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.273018:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.273642:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.274736:
HysteresisQuery: Returning.
2017-05-15 16:10:59.275267:
HysteresisQuery(48)
2017-05-15 16:10:59.275881:
SingleGeneQuery(48)
2017-05-15 16:10:59.276829:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.278940:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.279943:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.280790:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.281498:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.282247:
HysteresisQuery: Returning.
2017-05-15 16:10:59.282830:
HysteresisQuery(49)
2017-05-15 16:10:59.283408:
SingleGeneQuery(49)
2017-05-15 16:10:59.284035:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.285326:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.286072:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.286708:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.287414:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.289123:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.290222:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.290828:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.291989:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.293019:
HysteresisQuery: Returning.
2017-05-15 16:10:59.293603:
HysteresisQuery(50)
2017-05-15 16:10:59.294743:
SingleGeneQuery(50)
2017-05-15 16:10:59.295733:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.297396:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.298040:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.299253:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.299864:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.300596:
HysteresisQuery: Returning.
2017-05-15 16:10:59.301234:
HysteresisQuery(51)
2017-05-15 16:10:59.301786:
SingleGeneQuery(51)
2017-05-15 16:10:59.302515:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.303856:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.304867:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.305641:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.306201:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.307089:
HysteresisQuery: Returning.
2017-05-15 16:10:59.307969:
HysteresisQuery(52)
2017-05-15 16:10:59.308626:
SingleGeneQuery(52)
2017-05-15 16:10:59.310024:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.311635:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.312582:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.313185:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.314051:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.315610:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.316203:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.316764:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.317398:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.317998:
HysteresisQuery: Returning.
2017-05-15 16:10:59.318601:
HysteresisQuery(53)
2017-05-15 16:10:59.319220:
SingleGeneQuery(53)
2017-05-15 16:10:59.319941:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.320775:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.321458:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.322176:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.322753:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.323531:
HysteresisQuery: Returning.
2017-05-15 16:10:59.324662:
HysteresisQuery(54)
2017-05-15 16:10:59.325374:
SingleGeneQuery(54)
2017-05-15 16:10:59.326086:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.326946:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.328077:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.328846:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.329460:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.330256:
HysteresisQuery: Returning.
2017-05-15 16:10:59.330906:
HysteresisQuery(55)
2017-05-15 16:10:59.331662:
SingleGeneQuery(55)
2017-05-15 16:10:59.332439:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.333849:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.334444:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.335045:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.335885:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.337484:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.338240:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.338857:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.339635:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.340494:
HysteresisQuery: Returning.
2017-05-15 16:10:59.341266:
HysteresisQuery(56)
2017-05-15 16:10:59.341934:
SingleGeneQuery(56)
2017-05-15 16:10:59.342865:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.344553:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.345636:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.346584:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.347493:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.348252:
HysteresisQuery: Returning.
2017-05-15 16:10:59.348897:
HysteresisQuery(57)
2017-05-15 16:10:59.349517:
SingleGeneQuery(57)
2017-05-15 16:10:59.350248:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.351517:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.352122:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.352781:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.353388:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.354177:
HysteresisQuery: Returning.
2017-05-15 16:10:59.354877:
HysteresisQuery(58)
2017-05-15 16:10:59.355472:
SingleGeneQuery(58)
2017-05-15 16:10:59.356177:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.358259:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.359380:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.360030:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.360653:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.361399:
HysteresisQuery: Returning.
2017-05-15 16:10:59.362165:
HysteresisQuery(59)
2017-05-15 16:10:59.362819:
SingleGeneQuery(59)
2017-05-15 16:10:59.363561:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.365137:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.366016:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.366619:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.367399:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.368358:
HysteresisQuery: Returning.
2017-05-15 16:10:59.369514:
HysteresisQuery(60)
2017-05-15 16:10:59.370527:
SingleGeneQuery(60)
2017-05-15 16:10:59.371411:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.373308:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.374272:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.374868:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.376168:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.377205:
HysteresisQuery: Returning.
2017-05-15 16:10:59.377771:
HysteresisQuery(61)
2017-05-15 16:10:59.378457:
SingleGeneQuery(61)
2017-05-15 16:10:59.379277:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.381111:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.382229:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.382855:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.383483:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.384772:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.385420:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.385977:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.386655:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.387242:
HysteresisQuery: Returning.
2017-05-15 16:10:59.388233:
HysteresisQuery(62)
2017-05-15 16:10:59.388854:
SingleGeneQuery(62)
2017-05-15 16:10:59.390016:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.390894:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.391962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.392591:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.393202:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.394046:
HysteresisQuery: Returning.
2017-05-15 16:10:59.394816:
HysteresisQuery(63)
2017-05-15 16:10:59.395318:
SingleGeneQuery(63)
2017-05-15 16:10:59.396110:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.397857:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.398602:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.399309:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.399973:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.400973:
HysteresisQuery: Returning.
2017-05-15 16:10:59.401464:
HysteresisQuery(64)
2017-05-15 16:10:59.402072:
SingleGeneQuery(64)
2017-05-15 16:10:59.402816:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.404386:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.405503:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.406275:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.406934:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.408468:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.409150:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.409710:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.410276:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.410889:
HysteresisQuery: Returning.
2017-05-15 16:10:59.411573:
HysteresisQuery(65)
2017-05-15 16:10:59.412619:
SingleGeneQuery(65)
2017-05-15 16:10:59.413247:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.414753:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.415337:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.415983:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.416566:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.417337:
HysteresisQuery: Returning.
2017-05-15 16:10:59.418095:
HysteresisQuery(66)
2017-05-15 16:10:59.418731:
SingleGeneQuery(66)
2017-05-15 16:10:59.419501:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.420960:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.421685:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.422311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.423058:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.423848:
HysteresisQuery: Returning.
2017-05-15 16:10:59.424462:
HysteresisQuery(67)
2017-05-15 16:10:59.425094:
SingleGeneQuery(67)
2017-05-15 16:10:59.425869:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.427549:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.428244:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.429315:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.430002:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.431646:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.432763:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.433303:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.434021:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.434835:
HysteresisQuery: Returning.
2017-05-15 16:10:59.435425:
HysteresisQuery(68)
2017-05-15 16:10:59.436088:
SingleGeneQuery(68)
2017-05-15 16:10:59.436707:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.438199:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.438890:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.440059:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.440685:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.441539:
HysteresisQuery: Returning.
2017-05-15 16:10:59.442542:
HysteresisQuery(69)
2017-05-15 16:10:59.443318:
SingleGeneQuery(69)
2017-05-15 16:10:59.443987:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.445756:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.446556:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.447140:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.447720:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.448432:
HysteresisQuery: Returning.
2017-05-15 16:10:59.449099:
HysteresisQuery(70)
2017-05-15 16:10:59.449884:
SingleGeneQuery(70)
2017-05-15 16:10:59.450692:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.452172:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.452731:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.453351:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.454085:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.455814:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.456499:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.457218:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.458000:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.458559:
HysteresisQuery: Returning.
2017-05-15 16:10:59.459376:
HysteresisQuery(71)
2017-05-15 16:10:59.460227:
SingleGeneQuery(71)
2017-05-15 16:10:59.460929:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.463051:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.464233:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.464749:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.465504:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.466782:
HysteresisQuery: Returning.
2017-05-15 16:10:59.467452:
HysteresisQuery(72)
2017-05-15 16:10:59.468155:
SingleGeneQuery(72)
2017-05-15 16:10:59.468883:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.470194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.471110:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.471700:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.472420:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.473558:
HysteresisQuery: Returning.
2017-05-15 16:10:59.474192:
HysteresisQuery(73)
2017-05-15 16:10:59.475116:
SingleGeneQuery(73)
2017-05-15 16:10:59.475797:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.476687:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.477392:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.478052:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.478637:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.479372:
HysteresisQuery: Returning.
2017-05-15 16:10:59.479975:
HysteresisQuery(74)
2017-05-15 16:10:59.480899:
SingleGeneQuery(74)
2017-05-15 16:10:59.481804:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.483363:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.484164:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.484764:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.485369:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.486077:
HysteresisQuery: Returning.
2017-05-15 16:10:59.486708:
HysteresisQuery(75)
2017-05-15 16:10:59.487256:
SingleGeneQuery(75)
2017-05-15 16:10:59.487946:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.490162:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.491566:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.492276:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.492918:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.494133:
HysteresisQuery: Returning.
2017-05-15 16:10:59.494699:
HysteresisQuery(76)
2017-05-15 16:10:59.495599:
SingleGeneQuery(76)
2017-05-15 16:10:59.496323:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.497889:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.498788:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.499442:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.500682:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.501784:
HysteresisQuery: Returning.
2017-05-15 16:10:59.502433:
HysteresisQuery(77)
2017-05-15 16:10:59.503001:
SingleGeneQuery(77)
2017-05-15 16:10:59.503667:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.505095:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.505830:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.506541:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.507274:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.508550:
HysteresisQuery: Returning.
2017-05-15 16:10:59.509147:
HysteresisQuery(78)
2017-05-15 16:10:59.509768:
SingleGeneQuery(78)
2017-05-15 16:10:59.510579:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.511874:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.512851:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.513719:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.514392:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.515186:
HysteresisQuery: Returning.
2017-05-15 16:10:59.516025:
HysteresisQuery(79)
2017-05-15 16:10:59.516657:
SingleGeneQuery(79)
2017-05-15 16:10:59.517426:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.519222:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.519796:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.520442:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.521107:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.522731:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.523687:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.524239:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.525264:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.526302:
HysteresisQuery: Returning.
2017-05-15 16:10:59.526863:
HysteresisQuery(80)
2017-05-15 16:10:59.527611:
SingleGeneQuery(80)
2017-05-15 16:10:59.528223:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.530177:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.530944:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.531873:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.532853:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.533604:
HysteresisQuery: Returning.
2017-05-15 16:10:59.534326:
HysteresisQuery(81)
2017-05-15 16:10:59.535197:
SingleGeneQuery(81)
2017-05-15 16:10:59.535914:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.537681:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.538697:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.539322:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.540025:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.540759:
HysteresisQuery: Returning.
2017-05-15 16:10:59.541758:
HysteresisQuery(82)
2017-05-15 16:10:59.542604:
SingleGeneQuery(82)
2017-05-15 16:10:59.543263:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.545240:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.545921:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.546974:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.547613:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.548473:
HysteresisQuery: Returning.
2017-05-15 16:10:59.549092:
HysteresisQuery(83)
2017-05-15 16:10:59.549623:
SingleGeneQuery(83)
2017-05-15 16:10:59.550332:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.551706:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.552443:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.553014:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.553654:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.554350:
HysteresisQuery: Returning.
2017-05-15 16:10:59.554945:
HysteresisQuery(84)
2017-05-15 16:10:59.555554:
SingleGeneQuery(84)
2017-05-15 16:10:59.556643:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.557454:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.558473:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.559130:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.559656:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.560411:
HysteresisQuery: Returning.
2017-05-15 16:10:59.561167:
HysteresisQuery(85)
2017-05-15 16:10:59.561970:
SingleGeneQuery(85)
2017-05-15 16:10:59.562623:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.563980:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.564592:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.565209:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.565854:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.566737:
HysteresisQuery: Returning.
2017-05-15 16:10:59.567246:
HysteresisQuery(86)
2017-05-15 16:10:59.567879:
SingleGeneQuery(86)
2017-05-15 16:10:59.568569:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.570001:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.571067:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.571811:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.572949:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.573614:
HysteresisQuery: Returning.
2017-05-15 16:10:59.574240:
HysteresisQuery(87)
2017-05-15 16:10:59.574886:
SingleGeneQuery(87)
2017-05-15 16:10:59.575487:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.577500:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.578224:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.578874:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.579387:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.580229:
HysteresisQuery: Returning.
2017-05-15 16:10:59.580963:
HysteresisQuery(88)
2017-05-15 16:10:59.581742:
SingleGeneQuery(88)
2017-05-15 16:10:59.582606:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.583758:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.584905:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.585539:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.586188:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.586996:
HysteresisQuery: Returning.
2017-05-15 16:10:59.587922:
HysteresisQuery(89)
2017-05-15 16:10:59.588532:
SingleGeneQuery(89)
2017-05-15 16:10:59.589372:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.591928:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.592674:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.593619:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.594239:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.595042:
HysteresisQuery: Returning.
2017-05-15 16:10:59.595828:
HysteresisQuery(90)
2017-05-15 16:10:59.596596:
SingleGeneQuery(90)
2017-05-15 16:10:59.597267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.598737:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.599847:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.600524:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.601080:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.601880:
HysteresisQuery: Returning.
2017-05-15 16:10:59.602438:
HysteresisQuery(91)
2017-05-15 16:10:59.602946:
SingleGeneQuery(91)
2017-05-15 16:10:59.603626:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.605439:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.606751:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.607481:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.608381:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.609151:
HysteresisQuery: Returning.
2017-05-15 16:10:59.610075:
HysteresisQuery(92)
2017-05-15 16:10:59.610784:
SingleGeneQuery(92)
2017-05-15 16:10:59.611995:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.613432:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.614065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.614612:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.615507:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.616194:
HysteresisQuery: Returning.
2017-05-15 16:10:59.616997:
HysteresisQuery(93)
2017-05-15 16:10:59.617551:
SingleGeneQuery(93)
2017-05-15 16:10:59.618267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.619719:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.620707:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.621323:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.621860:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.622852:
HysteresisQuery: Returning.
2017-05-15 16:10:59.623514:
HysteresisQuery(94)
2017-05-15 16:10:59.624207:
SingleGeneQuery(94)
2017-05-15 16:10:59.625143:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.626280:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.626920:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.627667:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.628384:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.629609:
HysteresisQuery: Returning.
2017-05-15 16:10:59.630211:
HysteresisQuery(95)
2017-05-15 16:10:59.630820:
SingleGeneQuery(95)
2017-05-15 16:10:59.631589:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.632940:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.633707:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.634380:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.634985:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.635712:
HysteresisQuery: Returning.
2017-05-15 16:10:59.636343:
HysteresisQuery(96)
2017-05-15 16:10:59.637065:
SingleGeneQuery(96)
2017-05-15 16:10:59.637804:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.639575:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.640409:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.641061:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.641702:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.643084:
HysteresisQuery: Returning.
2017-05-15 16:10:59.643682:
HysteresisQuery(97)
2017-05-15 16:10:59.644255:
SingleGeneQuery(97)
2017-05-15 16:10:59.645163:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.646754:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.647422:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.648100:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.648732:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.650359:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.651095:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.651681:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.652311:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.652913:
HysteresisQuery: Returning.
2017-05-15 16:10:59.653457:
HysteresisQuery(98)
2017-05-15 16:10:59.654592:
SingleGeneQuery(98)
2017-05-15 16:10:59.655305:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.657013:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.657628:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.658566:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.659288:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.660094:
HysteresisQuery: Returning.
2017-05-15 16:10:59.660800:
HysteresisQuery(99)
2017-05-15 16:10:59.661414:
SingleGeneQuery(99)
2017-05-15 16:10:59.662272:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.663458:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.664042:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.664886:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.665777:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.666696:
HysteresisQuery: Returning.
2017-05-15 16:10:59.667328:
HysteresisQuery(100)
2017-05-15 16:10:59.667950:
SingleGeneQuery(100)
2017-05-15 16:10:59.668654:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.669414:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.670591:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.671354:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.672009:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.672929:
HysteresisQuery: Returning.
2017-05-15 16:10:59.673989:
HysteresisQuery(101)
2017-05-15 16:10:59.675009:
SingleGeneQuery(101)
2017-05-15 16:10:59.675738:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.677022:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.677949:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.678610:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.679638:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.680487:
HysteresisQuery: Returning.
2017-05-15 16:10:59.681158:
HysteresisQuery(102)
2017-05-15 16:10:59.681746:
SingleGeneQuery(102)
2017-05-15 16:10:59.682911:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.684178:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.684880:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.685749:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.686297:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.687015:
HysteresisQuery: Returning.
2017-05-15 16:10:59.687763:
HysteresisQuery(103)
2017-05-15 16:10:59.688320:
SingleGeneQuery(103)
2017-05-15 16:10:59.689069:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.690529:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.691160:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.692236:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.692858:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.693641:
HysteresisQuery: Returning.
2017-05-15 16:10:59.694334:
HysteresisQuery(104)
2017-05-15 16:10:59.695027:
SingleGeneQuery(104)
2017-05-15 16:10:59.695986:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.696888:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.697697:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.698335:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.699019:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.699939:
HysteresisQuery: Returning.
2017-05-15 16:10:59.700820:
HysteresisQuery(105)
2017-05-15 16:10:59.701475:
SingleGeneQuery(105)
2017-05-15 16:10:59.702355:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.703911:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.704601:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.705298:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.706034:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.706752:
HysteresisQuery: Returning.
2017-05-15 16:10:59.707385:
HysteresisQuery(106)
2017-05-15 16:10:59.708109:
SingleGeneQuery(106)
2017-05-15 16:10:59.708818:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.710405:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.711201:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.711865:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.712500:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.713150:
HysteresisQuery: Returning.
2017-05-15 16:10:59.714075:
HysteresisQuery(107)
2017-05-15 16:10:59.714602:
SingleGeneQuery(107)
2017-05-15 16:10:59.715356:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.717133:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.717962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.718575:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.719224:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.720057:
HysteresisQuery: Returning.
###Markdown
DSGRN Query Functions
###Code
from DSGRN import *
database = Database("querytest.db")
database.parametergraph.dimension()
###Output
_____no_output_____
###Markdown
We show here the network being considered in this example:
###Code
database
print(database.network.specification())
###Output
X1 : (X1)(~X3)
X2 : X1
X3 : (X1)(~X2)
###Markdown
Query OverviewIn order to perform queries on the database sometimes preprocessing is necessary. In order to give a uniform approach to this we have adopted a design where each query corresponds to a python class whose name ends with the suffix `Query`. Each class has a constructor (i.e. `__init__` method) which accepts some arguments to indicate parameters of the query (e.g. which database).We currently have the following queries:| Name | Query Parameters | Query Input | Query Output || ---- | ----------- | ------------ | --- || MonostableQuery | Database | Morse Graph Index | True/False || BistableQuery | Database | Morse Graph Index | True/False || MultistableQuery | Database | Morse Graph Index | True/False || SingleGeneQuery | Database, Name of Network Node | Reduced Parameter Index | Annotated Factor Graph || SingleFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || DoubleFixedPointQuery | Database, pair of Domain Bounds | Morse Graph Index | True/False || MonostableFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || InducibilityQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | Triple of True/False || HysteresisQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | True/False |When the query object is constructed, it is passed the required parameters and any preprocessing that is required to support the query is done. In some cases the preprocessing is trivial, and in other cases it may be more extensive. After the object is constructed, it can be used to perform queries. This is accomplished by invoking the objects `__call__` operator (i.e. treating the object as a function). The call operator receives the query input and returns the query output. For example:```single_gene_query = SingleGeneQuery(database, "X1")graph = single_gene_query(43)```In the first line, the query object is created with the query parameters `database` and `"X1"`. This results in computation being done to organize a table in the database to quickly support "Single Gene Queries". The created object `single_gene_query` has a method `__call__` which allows it to be called as a function in order to produce query results. The input of the `__call__` method is a "reduced parameter index" and what is returned will be an annotated graph structure specific to what this query does.In many cases the input to the query is a Morse Graph Index and the output is a boolean value which indicates whether or not the morse graph index is in a precomputed set of matches. These query classes typically also support another method `matches` which simply returns the set of matches. This allows the following code:```set_of_matches = SingleFixedPointQuery(database, domain_bounds).matches()```In this code, a query object is created, the `matches` method is called and returns the set of matches, but no reference to the query object is kept. When using this paradigm one should be careful not to unnecessarily create the same query multiple times, or else the same preprocessing step would be repeated. MonostableQuery, BistableQuery, and MultistableQuery
###Code
monostable_query_object = MonostableQuery(database)
bistable_query_object = BistableQuery(database)
multistable_query_object = MultistableQuery(database)
###Output
2017-10-24 13:17:41.078862:
MonostableQuery :: initializing
2017-10-24 13:17:41.080054:
MonostableQuery :: select MorseGraphIndex from (select MorseGraphIndex, count(*) as StableCount from (select MorseGraphIndex,Vertex from MorseGraphVertices except select MorseGraphIndex,Source from MorseGraphEdges) group by MorseGraphIndex) where StableCount=1;
2017-10-24 13:17:41.082888:
MonostableQuery :: constructed
###Markdown
Evaluate the query on a few Morse Graph Indices:
###Code
monostable_query_object(0)
monostable_query_object(1)
###Output
_____no_output_____
###Markdown
How many matches for each type of query?
###Code
print([len(monostable_query_object.matches()), len(bistable_query_object.matches()), len(multistable_query_object.matches())])
###Output
[45, 98, 110]
###Markdown
Print the list of Morse graph indices which satisfy the monostable query.
###Code
print(monostable_query_object.matches())
###Output
frozenset([0, 2, 3, 6, 9, 10, 11, 130, 18, 19, 20, 21, 153, 25, 26, 28, 30, 32, 34, 36, 38, 40, 43, 49, 50, 53, 55, 56, 59, 60, 74, 75, 78, 79, 89, 96, 131, 102, 104, 146, 113, 148, 122, 123, 127])
###Markdown
Directly verify that all returns matches satisfy the corresponding query:
###Code
all( monostable_query_object(mgi) for mgi in monostable_query_object.matches() )
database.DrawMorseGraph(131)
###Output
_____no_output_____
###Markdown
SingleGeneQueryOur interest is in fixing all combinatorial parameters except for the logic parameter corresponding to a single node and considering the set of parameters corresponding to this choice. Due to the factorization of the parameter graph, this set of parameters is isomorphic to the factor graph associated to the node of interest. In order to handle repeated queries efficiently, it is necessary to prepare a table which reorders information so that it is I/O efficient for algorithms to retrieve. The following does this:
###Code
single_gene_query = SingleGeneQuery(database, "X1")
###Output
2017-05-15 16:10:57.880238:
SingleGeneQuery(querytest.db, X1)
2017-05-15 16:10:57.885737:
SingleGeneQuery: FactorGraph generated
2017-05-15 16:10:57.886477:
SingleGeneQuery: SingleGeneQuery attribute missing from python database object.
2017-05-15 16:10:57.887072:
SingleGeneQuery: SingleGeneQuery attributes created.
2017-05-15 16:10:57.887746:
SingleGeneQuery: database structure unaware of gene X1
2017-05-15 16:10:57.888523:
SingleGeneQuery: sanitized X1
2017-05-15 16:10:57.889431:
SingleGeneQuery: cursor constructed
2017-05-15 16:10:57.890141:
SingleGeneQuery: checked for table
2017-05-15 16:10:57.890790:
SingleGeneQuery: added gene to python database object.
2017-05-15 16:10:57.891593:
SingleGeneQuery: constructed
###Markdown
For a single gene query, the queries are graphs isomorphic to the factor graph, and the number of such queries corresponds to the number of "reduced parameter indices". This will be explained in more depth shortly. To help explain this we first examine the following computation:
###Code
N = single_gene_query.number_of_gene_parameters()
M = single_gene_query.number_of_reduced_parameters()
L = database.parametergraph.size()
print([N, M, N*M, L])
###Output
[50L, 108L, 5400L, 5400L]
###Markdown
Importantly, this factorization corresponds to a way to convert a parameter index (an integer) into a pair of integers, one in [0,50) and the other in [0,108), which we call the _gene parameter index_ and the _reduced parameter index_. The manner in which this is done is technical and has to do with how the integers encode combinatorial parameters using a mixed-radix system. Roughly speaking, the gene parameter index is obtained by extracting a digit from the mixed-radix representation of the parameter index, and what remains after removing the digit entirely (not just setting it to 0) is the reduced parameter index. This process can be reversed as well, so both the original parameter index and the (GeneParameterIndex, ReducedParameterIndex) pair are equivalent representations. What the prepare step we just accomplished did was create a table with the database's information which sorted the information by ReducedParameterIndex first and GeneParameterIndex second. (The original database sorts by ParameterIndex.) Performing a single-gene queryNow we perform a query. The result which the query returns is a graph. This graph contains data which has the raw information obtained from the query in the form of a python dictionary (i,e, `{key1:value1, key2:value2,...}`) where the keys are gene parameter indices, and the values are tuples `(hexcode, parameter index, morsegraphindex)`
###Code
graph = single_gene_query(43) # 43 is a "reduced parameter index"
graph.data
###Output
_____no_output_____
###Markdown
The query above returns the "MorseGraphIndex" which can be used with the database to retrieve the Morse graph. However we might only want to know if the Morse graph has a certain property. For example, we might want to know if it has 1 minimal node, or multiple (2 or more) minimal nodes. We create a function which takes a "MorseGraphIndex" and returns True if the associated Morse graph has multiple minimal nodes and False otherwise. Visualizing the queryThe above information describes a partially ordered set. In this poset each node corresponds to a parameter index. Each parameter index corresponds to a pair of sub-indices called the "GeneParameterIndex" and the "ReducedParameterIndex" which are the integers resulting from splitting out the "digit" corresponding to the logic parameter of the gene of interest. The "GeneParameterIndex" corresponds directly to the logic parameter of the gene of interest which can also be represented with a "HexCode". Using the hex code representation we learn adjacency information (due to the GPG=CPG theorem). Since our query gives us all of this information, the query automatically determines this information and can display itself as a graph of the labelled poset corresponding to the query. It also comes equipped with some methods for checking graph properties (as we demonstrate later). The nodes themselves are labelled according to their "ParameterIndex" and "MorseGraphIndex":
###Code
graph
###Output
_____no_output_____
###Markdown
Features of the graph queryIn addition to being a graph there are other attributes of the query that are of use. In particular, The graph is as follows: * The vertices of the graph (`graph.vertices`) are named according to Gene Parameter Index (gpi). * `graph.edges` contains the directed edge p -> q iff p < q and the associated logic parameters are adjacent.* The graph is (by default) labelled with pairs (Parameter index, Morse graph index). The default graph labelling can be changed by replacing the `label` attribute with a new function. A `label` function takes the vertex name (i.e. gpi) as input and returns a label string.* The graph is (by default) colored blue. The default graph coloring can be changed by replacing teh `color` attribute with a new function. A `color` function takes the vertex name as an input and returns a new color string.In addition the following extra structures are provided:* `graph.data` is a dictionary from gene parameter index to (hex code, parameter index, morse graph index)* `graph.mgi` is a function which accepts a gpi and returns the associated Morse graph idnex* `graph.num_inputs` is the number of network edges which are inputs to the gene associated with the query* `graph.num_outputs`is the number of network edges which are outputs to the gene associated with the query* `graph.essential` is a boolean-valued function which determines if each vertex corresponds to an essential parameter node Changing the color to inspect node propertiesIn the above graph all the nodes have the same color. We can change this so that the color of the nodes reflects some property of our choosing. As an example, we might ask if a node has a Morse graph with multistability -- if so, we can color the node red, otherwise we can color the node blue. This is done as follows:
###Code
# Create a function which tells us if each vertex has the multistable property:
is_multistable = MultistableQuery(database)
# Change the coloring method of the graph to check for multistability:
graph.color = lambda v : "red" if is_multistable(v) else "blue"
# Display the graph:
graph
###Output
_____no_output_____
###Markdown
Testing the query resultThe above query indicates that some of the parameters associated with the query had multistability and some did not. In order to make sure everything is working properly, let's take an example of each class and draw the Morse graph. For instance, parameter index 2199 has Morse Graph 18, and is colored blue, which is supposed to correspond to a lack of multistability. We check this and find it is indeed the case:
###Code
database.DrawMorseGraph(18)
###Output
_____no_output_____
###Markdown
Similarly, our query result indicates parameter index 2180 corresponds to Morse Graph 84, which is colored red, indicated it _does_ exhibit multistability. We check this as well:
###Code
database.DrawMorseGraph(84)
###Output
_____no_output_____
###Markdown
SingleFixedPointQuery, DoubleFixedPointQueryWe have the capability to retrieve parameter indices for which a FP occurs in a certain location. We call these locations "domains". A domain can be indicated by which "bin" it corresponds to along each dimension. A bin is an interval bounded by either (a) consecutive thresholds in a given dimension, (b) between 0 and the first threshold, or (c) bounded below by the last threshold and unbounded above. In particular, for each dimension the number of thresholds is equal to the number of out-edges of the corresponding network node. If there are m such thresholds then there are m+1 locations (bins) along this dimension which we label 0, 1, 2, ..., m. This allows us to describe the location of a domain by listing bin numbers for each dimension.We can consider many domains at once which are grouped together in rectangular prisms. To represent these, we create a dictionary object where for each variable we product a key value pair where the key is the variable name and the value is a list of two integers [a,b] such that we mean that the variable can only occur in the bins between a and b (inclusive). If we omit a variable from the dictionary it is allowed to be in any bin. Also, if a=b we can simply write "a" instead of "[a,a]". For example:
###Code
bounds110 = {"X1":1,"X2":1,"X3":0} # Domain 1,1,0
bounds210 = {"X1":[2,2],"X2":[1,1],"X3":[0,1]} # Domain 2,1,0 or Domain 2,1,1
bounds311 = {"X1":[3,3],"X2":[1,1],"X3":[1,1]} # Domain 3,1,1
###Output
_____no_output_____
###Markdown
Using these "bounds" variables to represent groups of domains, we can use query functions which ask for the collection of morse graphs which have an "FP" node labelled with a domain in those bounds. For example, to find the set of Morse Graph indices corresponding to fixed points in the region specified by "bounds110":
###Code
matches110 = SingleFixedPointQuery(database, bounds110).matches()
###Output
2017-05-15 16:10:58.070742:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.071756:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.072693:
MatchQuery({'X2': 1, 'X3': 0, 'X1': 1}, Matches)
2017-05-15 16:10:58.073469:
MatchQuery :: built expressions ["Label like 'FP { 1, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%'"]
2017-05-15 16:10:58.074192:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 1, _, _%';
2017-05-15 16:10:58.075960:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.076738:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%';
2017-05-15 16:10:58.077579:
MatchQuery :: constructed
2017-05-15 16:10:58.078225:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.079000:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.079720:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds210":
###Code
matches210 = SingleFixedPointQuery(database, bounds210).matches()
###Output
2017-05-15 16:10:58.084631:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.085543:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.086846:
MatchQuery({'X2': [1, 1], 'X3': [0, 1], 'X1': [2, 2]}, Matches)
2017-05-15 16:10:58.087479:
MatchQuery :: built expressions ["Label like 'FP { 2, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%'"]
2017-05-15 16:10:58.088084:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 2, _, _%';
2017-05-15 16:10:58.089078:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.089944:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%';
2017-05-15 16:10:58.090938:
MatchQuery :: constructed
2017-05-15 16:10:58.091900:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.092890:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.093663:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds311":
###Code
matches311 = SingleFixedPointQuery(database, bounds311).matches()
###Output
2017-05-15 16:10:58.098953:
SingleFixedPointQuery :: initializing
2017-05-15 16:10:58.100141:
SingleFixedPointQuery :: calling MatchQuery
2017-05-15 16:10:58.100989:
MatchQuery({'X2': [1, 1], 'X3': [1, 1], 'X1': [3, 3]}, Matches)
2017-05-15 16:10:58.101795:
MatchQuery :: built expressions ["Label like 'FP { 3, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 1%'"]
2017-05-15 16:10:58.102456:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 3, _, _%';
2017-05-15 16:10:58.103371:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2017-05-15 16:10:58.104231:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 1%';
2017-05-15 16:10:58.104981:
MatchQuery :: constructed
2017-05-15 16:10:58.105630:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2017-05-15 16:10:58.106495:
SingleFixedPointQuery :: drop table Matches;
2017-05-15 16:10:58.107423:
SingleFixedPointQuery :: constructed
###Markdown
Find the set of Morse Graph indices with both a fixed point in 1,1,0 and a fixed point in 3,1,1:
###Code
matches_both = DoubleFixedPointQuery(database, bounds110,bounds311).matches()
len(matches110), len(matches210), len(matches311), len(matches_both)
matches_both
###Output
_____no_output_____
###Markdown
Queries on Graph PropertiesIt is possible to make queries about graph properties. If we have developed a set of queries about the vertices, we can ask several kinds of questions:1) Does the minimal node have a certain property?2) Does the maximal node have a certain property?3) Must every path from the minimal node to the maximal node pass through a node with a certain property?We can even ask questions about how many paths from the minimal node to the maximal node have a certain property (or the fraction of paths). To help visualize the examples we color the graph "green", "blue", "red", and "yellow" according to each vertex's status with regard to the FP location query examples above. Specifically:
###Code
graph.color = lambda v : "green" if graph.mgi(v) in matches_both else ("blue" if graph.mgi(v) in matches210 else ( "yellow" if graph.mgi(v) in matches311 else "red"))
graph
minimum_gpi = 0
maximum_gpi = len(graph.vertices) - 1
###Output
_____no_output_____
###Markdown
Q1. Is the minimal node red?
###Code
graph.color(minimum_gpi) == "red"
###Output
_____no_output_____
###Markdown
Q2. Is the maximal node yellow?
###Code
graph.color(maximum_gpi) == "yellow"
###Output
_____no_output_____
###Markdown
Q3(a). Is there an essential green node?
###Code
any( graph.essential(v) and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
List all essential green nodes:
###Code
[v for v in graph.vertices if graph.essential(v) and graph.color(v) == "green"]
###Output
_____no_output_____
###Markdown
Q3(b). Does every path from min to max pass through green?
###Code
predicate = lambda v : graph.color(v) == "green"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
No, they don't. What percentage of them pass through green?
###Code
subgraph = graph.subgraph(lambda v : not predicate(v))
number_missing_green = subgraph.numberOfPaths(minimum_gpi,maximum_gpi)
total_number = graph.numberOfPaths(minimum_gpi,maximum_gpi)
print str((1.0 - float(number_missing_green)/float(total_number))*100.0) + "%"
###Output
11.0929853181%
###Markdown
Q3(b)'. Does every path from min to max pass through a blue vertex?
###Code
predicate = lambda v : graph.color(v) == "blue"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
Which means there are zero paths from minimum to maximum in the subgraph where we take out the blue vertices, correct?
###Code
subgraph = graph.subgraph(lambda v : graph.color(v) != "blue")
if subgraph.numberOfPaths(minimum_gpi,maximum_gpi) == 0: print("Correct.")
###Output
Correct.
###Markdown
Q3(c). Is there an intermediate (neither max nor min) green node?
###Code
any( v != minimum_gpi and v != maximum_gpi and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
Visualizing the Essential parameter nodes:
###Code
graph.color = lambda v : "red" if graph.essential(v) else "green"
graph
###Output
_____no_output_____
###Markdown
InducibilityQuery
###Code
inducibility_query_object = InducibilityQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, inducibility_query_object.GeneQuery.number_of_reduced_parameters())
[ inducibility_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2017-05-15 16:10:58.353433:
SingleGeneQuery(0)
2017-05-15 16:10:58.354894:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.355778:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.356459:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.357104:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.357759:
SingleGeneQuery(1)
2017-05-15 16:10:58.358425:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.359456:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.360496:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.361434:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.362208:
SingleGeneQuery(2)
2017-05-15 16:10:58.362863:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.363653:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.364236:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.364833:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.365628:
SingleGeneQuery(3)
2017-05-15 16:10:58.366598:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.367553:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.368145:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.368834:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.369519:
SingleGeneQuery(4)
2017-05-15 16:10:58.370149:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.371194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.371773:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.372440:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.373106:
SingleGeneQuery(5)
2017-05-15 16:10:58.373705:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.374781:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.375421:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.376008:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.377098:
SingleGeneQuery(6)
2017-05-15 16:10:58.377754:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.378449:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.379010:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.379593:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.380218:
SingleGeneQuery(7)
2017-05-15 16:10:58.380860:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.383934:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.384591:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.385307:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.385999:
SingleGeneQuery(8)
2017-05-15 16:10:58.386715:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.387695:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.388506:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.389115:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.389772:
SingleGeneQuery(9)
2017-05-15 16:10:58.390627:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.391484:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.392165:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.392831:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.393500:
SingleGeneQuery(10)
2017-05-15 16:10:58.394136:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.394892:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.395520:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.396098:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.396770:
SingleGeneQuery(11)
2017-05-15 16:10:58.397470:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.398428:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.398969:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.399582:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.400219:
SingleGeneQuery(12)
2017-05-15 16:10:58.400857:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.401668:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.402247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.402857:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.403605:
SingleGeneQuery(13)
2017-05-15 16:10:58.404810:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.405697:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.406392:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.407027:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.407631:
SingleGeneQuery(14)
2017-05-15 16:10:58.408333:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.409138:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.409987:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.410882:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.411985:
SingleGeneQuery(15)
2017-05-15 16:10:58.412748:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.413834:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.414470:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.415093:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.415705:
SingleGeneQuery(16)
2017-05-15 16:10:58.416423:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.417442:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.418071:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.418720:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.419324:
SingleGeneQuery(17)
2017-05-15 16:10:58.420092:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.421147:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.421757:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.422458:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.423037:
SingleGeneQuery(18)
2017-05-15 16:10:58.423805:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.424666:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.425200:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.425894:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.426518:
SingleGeneQuery(19)
2017-05-15 16:10:58.427133:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.427888:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.428439:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.429060:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.429637:
SingleGeneQuery(20)
2017-05-15 16:10:58.430324:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.431114:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.431678:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.432295:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.432865:
SingleGeneQuery(21)
2017-05-15 16:10:58.433497:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.434347:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.435186:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.435942:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.436614:
SingleGeneQuery(22)
2017-05-15 16:10:58.437431:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.438952:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.439673:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.440371:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.441083:
SingleGeneQuery(23)
2017-05-15 16:10:58.441803:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.442775:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.443372:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.443936:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.444568:
SingleGeneQuery(24)
2017-05-15 16:10:58.445166:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.445872:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.446492:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.446985:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.447541:
SingleGeneQuery(25)
2017-05-15 16:10:58.448196:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.448878:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.449483:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.450093:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.450804:
SingleGeneQuery(26)
2017-05-15 16:10:58.451410:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.452330:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.452917:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.453622:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.454402:
SingleGeneQuery(27)
2017-05-15 16:10:58.455195:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.456220:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.457031:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.457571:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.458217:
SingleGeneQuery(28)
2017-05-15 16:10:58.458951:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.459837:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.460826:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.461492:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.462125:
SingleGeneQuery(29)
2017-05-15 16:10:58.462769:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.463577:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.464131:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.464711:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.465373:
SingleGeneQuery(30)
2017-05-15 16:10:58.466197:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.467336:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.468027:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.468665:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.469309:
SingleGeneQuery(31)
2017-05-15 16:10:58.470025:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.470913:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.471761:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.472342:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.473070:
SingleGeneQuery(32)
2017-05-15 16:10:58.473719:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.474520:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.475099:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.475671:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.476242:
SingleGeneQuery(33)
2017-05-15 16:10:58.477011:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.478076:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.478669:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.479289:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.479913:
SingleGeneQuery(34)
2017-05-15 16:10:58.480506:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.481204:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.481823:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.482461:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.483081:
SingleGeneQuery(35)
2017-05-15 16:10:58.483951:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.484750:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.485376:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.486062:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.486758:
SingleGeneQuery(36)
2017-05-15 16:10:58.487703:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.488572:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.489313:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.489920:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.490572:
SingleGeneQuery(37)
2017-05-15 16:10:58.491254:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.492124:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.492742:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.493419:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.494334:
SingleGeneQuery(38)
2017-05-15 16:10:58.495108:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.495901:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.496455:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.497096:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.497756:
SingleGeneQuery(39)
2017-05-15 16:10:58.498451:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.499256:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.499804:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.500356:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.501189:
SingleGeneQuery(40)
2017-05-15 16:10:58.501786:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.502548:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.503246:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.503884:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.504697:
SingleGeneQuery(41)
2017-05-15 16:10:58.505899:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.507514:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.508202:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.508803:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.509452:
SingleGeneQuery(42)
2017-05-15 16:10:58.510125:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.512193:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.513145:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.513762:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.514322:
SingleGeneQuery(43)
2017-05-15 16:10:58.514941:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.516390:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.517170:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.517759:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.518489:
SingleGeneQuery(44)
2017-05-15 16:10:58.519243:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.520580:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.521290:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.522376:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.523243:
SingleGeneQuery(45)
2017-05-15 16:10:58.523966:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.526406:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.527367:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.528332:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.528891:
SingleGeneQuery(46)
2017-05-15 16:10:58.529581:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.530826:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.531477:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.532084:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.532742:
SingleGeneQuery(47)
2017-05-15 16:10:58.533459:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.535114:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.535745:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.536359:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.537062:
SingleGeneQuery(48)
2017-05-15 16:10:58.537714:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.538605:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.539269:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.539904:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.540650:
SingleGeneQuery(49)
2017-05-15 16:10:58.541673:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.542710:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.543357:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.544040:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.544749:
SingleGeneQuery(50)
2017-05-15 16:10:58.545301:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.546032:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.546616:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.547233:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.547795:
SingleGeneQuery(51)
2017-05-15 16:10:58.548469:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.549516:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.550217:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.550804:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.551415:
SingleGeneQuery(52)
2017-05-15 16:10:58.552114:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.553625:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.554769:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.555386:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.556166:
SingleGeneQuery(53)
2017-05-15 16:10:58.556811:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.558333:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.558978:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.559668:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.560516:
SingleGeneQuery(54)
2017-05-15 16:10:58.561684:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.563365:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.564115:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.564783:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.565422:
SingleGeneQuery(55)
2017-05-15 16:10:58.566112:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.567761:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.568507:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.569184:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.569970:
SingleGeneQuery(56)
2017-05-15 16:10:58.570616:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.572115:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.572792:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.573421:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.574216:
SingleGeneQuery(57)
2017-05-15 16:10:58.574960:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.576706:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.577388:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.578029:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.578862:
SingleGeneQuery(58)
2017-05-15 16:10:58.579583:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.581140:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.581695:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.582378:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.583083:
SingleGeneQuery(59)
2017-05-15 16:10:58.583771:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.585598:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.586277:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.587304:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.588272:
SingleGeneQuery(60)
2017-05-15 16:10:58.589508:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.591324:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.592195:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.593620:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.595310:
SingleGeneQuery(61)
2017-05-15 16:10:58.596178:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.598403:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.599319:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.599815:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.600790:
SingleGeneQuery(62)
2017-05-15 16:10:58.602121:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.604324:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.605062:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.606041:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.606976:
SingleGeneQuery(63)
2017-05-15 16:10:58.608472:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.610843:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.612144:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.612888:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.613629:
SingleGeneQuery(64)
2017-05-15 16:10:58.614322:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.615527:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.616142:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.616682:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.617365:
SingleGeneQuery(65)
2017-05-15 16:10:58.618327:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.619721:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.620366:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.621077:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.622296:
SingleGeneQuery(66)
2017-05-15 16:10:58.623091:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.625145:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.625883:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.627214:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.628380:
SingleGeneQuery(67)
2017-05-15 16:10:58.629387:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.632366:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.633353:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.634204:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.635014:
SingleGeneQuery(68)
2017-05-15 16:10:58.635831:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.637418:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.638055:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.638592:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.639299:
SingleGeneQuery(69)
2017-05-15 16:10:58.639871:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.641452:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.642080:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.643108:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.643724:
SingleGeneQuery(70)
2017-05-15 16:10:58.644431:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.645544:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.646139:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.646813:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.647413:
SingleGeneQuery(71)
2017-05-15 16:10:58.648139:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.649949:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.650646:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.651658:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.652347:
SingleGeneQuery(72)
2017-05-15 16:10:58.653343:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.654841:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.655626:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.656253:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.656885:
SingleGeneQuery(73)
2017-05-15 16:10:58.657492:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.659022:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.659997:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.660596:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.661265:
SingleGeneQuery(74)
2017-05-15 16:10:58.662147:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.664049:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.664679:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.665280:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.666030:
SingleGeneQuery(75)
2017-05-15 16:10:58.666722:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.669878:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.670590:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.671229:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.672120:
SingleGeneQuery(76)
2017-05-15 16:10:58.672799:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.674445:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.675502:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.676080:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.676843:
SingleGeneQuery(77)
2017-05-15 16:10:58.677570:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.679247:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.680036:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.681132:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.682185:
SingleGeneQuery(78)
2017-05-15 16:10:58.682908:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.684366:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.685013:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.685615:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.686488:
SingleGeneQuery(79)
2017-05-15 16:10:58.687127:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.688676:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.689675:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.690299:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.691046:
SingleGeneQuery(80)
2017-05-15 16:10:58.691664:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.693187:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.693815:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.694363:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.695383:
SingleGeneQuery(81)
2017-05-15 16:10:58.696205:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.697610:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.698326:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.698941:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.699584:
SingleGeneQuery(82)
2017-05-15 16:10:58.700268:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.702113:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.702749:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.703392:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.704074:
SingleGeneQuery(83)
2017-05-15 16:10:58.704843:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.706320:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.706904:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.707557:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.708622:
SingleGeneQuery(84)
2017-05-15 16:10:58.709401:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.710862:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.711610:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.712183:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.713206:
SingleGeneQuery(85)
2017-05-15 16:10:58.714193:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.715301:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.715841:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.716425:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.717094:
SingleGeneQuery(86)
2017-05-15 16:10:58.717803:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.719445:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.720119:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.720889:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.722009:
SingleGeneQuery(87)
2017-05-15 16:10:58.722699:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.724456:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.725247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.725840:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.726539:
SingleGeneQuery(88)
2017-05-15 16:10:58.727481:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.729378:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.730484:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.731025:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.731637:
SingleGeneQuery(89)
2017-05-15 16:10:58.732323:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.733640:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.734436:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.735075:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.736050:
SingleGeneQuery(90)
2017-05-15 16:10:58.736777:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.738562:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.739800:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.740543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.741208:
SingleGeneQuery(91)
2017-05-15 16:10:58.741970:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.743804:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.744602:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.745326:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.746519:
SingleGeneQuery(92)
2017-05-15 16:10:58.747200:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.748870:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.749492:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.750258:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.751169:
SingleGeneQuery(93)
2017-05-15 16:10:58.751853:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.752945:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.753639:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.754497:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.755349:
SingleGeneQuery(94)
2017-05-15 16:10:58.756373:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.757295:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.757862:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.758457:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.759202:
SingleGeneQuery(95)
2017-05-15 16:10:58.760038:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.761346:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.762378:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.763024:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.763727:
SingleGeneQuery(96)
2017-05-15 16:10:58.764401:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.765835:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.766485:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.767159:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.767860:
SingleGeneQuery(97)
2017-05-15 16:10:58.768619:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.770677:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.771728:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.772550:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.773262:
SingleGeneQuery(98)
2017-05-15 16:10:58.774016:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.775390:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.776049:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.776677:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.778012:
SingleGeneQuery(99)
2017-05-15 16:10:58.778721:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.779876:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.780431:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.780977:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.781674:
SingleGeneQuery(100)
2017-05-15 16:10:58.782267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.783554:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.784107:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.784696:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.785435:
SingleGeneQuery(101)
2017-05-15 16:10:58.786257:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.787678:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.788295:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.789359:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.790382:
SingleGeneQuery(102)
2017-05-15 16:10:58.791029:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.792591:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.793284:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.794064:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.794982:
SingleGeneQuery(103)
2017-05-15 16:10:58.795747:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.796616:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.797160:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.797702:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.798278:
SingleGeneQuery(104)
2017-05-15 16:10:58.798957:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.800753:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.801565:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.802162:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.802922:
SingleGeneQuery(105)
2017-05-15 16:10:58.803968:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.805925:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.806638:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.807420:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.808211:
SingleGeneQuery(106)
2017-05-15 16:10:58.809041:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.811137:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.811960:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.812609:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.813409:
SingleGeneQuery(107)
2017-05-15 16:10:58.814071:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.814800:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.815366:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.816046:
SingleGeneQuery: graph attributes emplaced
###Markdown
HysteresisQuery
###Code
hysteresis_query_object = HysteresisQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, hysteresis_query_object.GeneQuery.number_of_reduced_parameters())
[ hysteresis_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2017-05-15 16:10:58.902577:
HysteresisQuery(0)
2017-05-15 16:10:58.903380:
SingleGeneQuery(0)
2017-05-15 16:10:58.904808:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.905998:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.906682:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.907365:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.907947:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.910048:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.911135:
HysteresisQuery: Alignment Graph has 0 vertices
2017-05-15 16:10:58.911872:
HysteresisQuery: Alignment Graph has 0 edges
2017-05-15 16:10:58.912592:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.913212:
HysteresisQuery: Returning.
2017-05-15 16:10:58.913848:
HysteresisQuery(1)
2017-05-15 16:10:58.914604:
SingleGeneQuery(1)
2017-05-15 16:10:58.915295:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.916995:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.917689:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.918311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.918992:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.919739:
HysteresisQuery: Returning.
2017-05-15 16:10:58.920461:
HysteresisQuery(2)
2017-05-15 16:10:58.921137:
SingleGeneQuery(2)
2017-05-15 16:10:58.922005:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.923331:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.923920:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.924894:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.925618:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.927480:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.928211:
HysteresisQuery: Alignment Graph has 5 vertices
2017-05-15 16:10:58.929059:
HysteresisQuery: Alignment Graph has 5 edges
2017-05-15 16:10:58.929741:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.930348:
HysteresisQuery: Returning.
2017-05-15 16:10:58.931161:
HysteresisQuery(3)
2017-05-15 16:10:58.932183:
SingleGeneQuery(3)
2017-05-15 16:10:58.932949:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.934306:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.935046:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.936011:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.937113:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.937819:
HysteresisQuery: Returning.
2017-05-15 16:10:58.938517:
HysteresisQuery(4)
2017-05-15 16:10:58.939502:
SingleGeneQuery(4)
2017-05-15 16:10:58.940342:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.942338:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.943003:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.943696:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.944314:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.945090:
HysteresisQuery: Returning.
2017-05-15 16:10:58.945748:
HysteresisQuery(5)
2017-05-15 16:10:58.946478:
SingleGeneQuery(5)
2017-05-15 16:10:58.947445:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.948184:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.948733:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.949310:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.950000:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.950710:
HysteresisQuery: Returning.
2017-05-15 16:10:58.951455:
HysteresisQuery(6)
2017-05-15 16:10:58.952710:
SingleGeneQuery(6)
2017-05-15 16:10:58.953471:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.954912:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.955584:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.956507:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.957166:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.957860:
HysteresisQuery: Returning.
2017-05-15 16:10:58.958808:
HysteresisQuery(7)
2017-05-15 16:10:58.959560:
SingleGeneQuery(7)
2017-05-15 16:10:58.960658:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.961646:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.962249:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.962966:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.964020:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.965669:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.966247:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:58.966769:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:58.967386:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.968052:
HysteresisQuery: Returning.
2017-05-15 16:10:58.969019:
HysteresisQuery(8)
2017-05-15 16:10:58.969605:
SingleGeneQuery(8)
2017-05-15 16:10:58.970336:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.971906:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.972574:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.973543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.974106:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.976146:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:58.976840:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:58.977497:
HysteresisQuery: Alignment Graph has 17 edges
2017-05-15 16:10:58.978204:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:58.978955:
HysteresisQuery: Returning.
2017-05-15 16:10:58.979647:
HysteresisQuery(9)
2017-05-15 16:10:58.980219:
SingleGeneQuery(9)
2017-05-15 16:10:58.981026:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.982593:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.983251:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.983916:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.984679:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.985524:
HysteresisQuery: Returning.
2017-05-15 16:10:58.986477:
HysteresisQuery(10)
2017-05-15 16:10:58.987614:
SingleGeneQuery(10)
2017-05-15 16:10:58.988629:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.990807:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.991625:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.992311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:58.993238:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:58.994017:
HysteresisQuery: Returning.
2017-05-15 16:10:58.994755:
HysteresisQuery(11)
2017-05-15 16:10:58.995484:
SingleGeneQuery(11)
2017-05-15 16:10:58.996191:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:58.998134:
SingleGeneQuery: Q constructed
2017-05-15 16:10:58.998861:
SingleGeneQuery: graph constructed
2017-05-15 16:10:58.999543:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.000169:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.000920:
HysteresisQuery: Returning.
2017-05-15 16:10:59.001702:
HysteresisQuery(12)
2017-05-15 16:10:59.002356:
SingleGeneQuery(12)
2017-05-15 16:10:59.003104:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.004450:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.005065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.005943:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.006843:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.007595:
HysteresisQuery: Returning.
2017-05-15 16:10:59.008673:
HysteresisQuery(13)
2017-05-15 16:10:59.009242:
SingleGeneQuery(13)
2017-05-15 16:10:59.010023:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.011615:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.012278:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.013088:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.013693:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.015332:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.016033:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.016881:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.017591:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.018199:
HysteresisQuery: Returning.
2017-05-15 16:10:59.018818:
HysteresisQuery(14)
2017-05-15 16:10:59.019446:
SingleGeneQuery(14)
2017-05-15 16:10:59.020220:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.021133:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.021834:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.022746:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.023534:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.024399:
HysteresisQuery: Returning.
2017-05-15 16:10:59.024990:
HysteresisQuery(15)
2017-05-15 16:10:59.025591:
SingleGeneQuery(15)
2017-05-15 16:10:59.027014:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.029072:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.029813:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.030511:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.031094:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.031898:
HysteresisQuery: Returning.
2017-05-15 16:10:59.032490:
HysteresisQuery(16)
2017-05-15 16:10:59.033130:
SingleGeneQuery(16)
2017-05-15 16:10:59.033784:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.035360:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.036009:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.036657:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.037307:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.038082:
HysteresisQuery: Returning.
2017-05-15 16:10:59.038853:
HysteresisQuery(17)
2017-05-15 16:10:59.039787:
SingleGeneQuery(17)
2017-05-15 16:10:59.040417:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.041563:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.042247:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.043859:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.044520:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.045334:
HysteresisQuery: Returning.
2017-05-15 16:10:59.045903:
HysteresisQuery(18)
2017-05-15 16:10:59.046467:
SingleGeneQuery(18)
2017-05-15 16:10:59.047162:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.048473:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.049201:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.049986:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.050919:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.051778:
HysteresisQuery: Returning.
2017-05-15 16:10:59.052613:
HysteresisQuery(19)
2017-05-15 16:10:59.053291:
SingleGeneQuery(19)
2017-05-15 16:10:59.054032:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.055353:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.055966:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.057071:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.057676:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.058964:
HysteresisQuery: Returning.
2017-05-15 16:10:59.059607:
HysteresisQuery(20)
2017-05-15 16:10:59.060203:
SingleGeneQuery(20)
2017-05-15 16:10:59.061025:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.063321:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.063962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.064618:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.065289:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.065990:
HysteresisQuery: Returning.
2017-05-15 16:10:59.066858:
HysteresisQuery(21)
2017-05-15 16:10:59.067435:
SingleGeneQuery(21)
2017-05-15 16:10:59.068095:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.069749:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.070459:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.071365:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.072181:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.073097:
HysteresisQuery: Returning.
2017-05-15 16:10:59.073673:
HysteresisQuery(22)
2017-05-15 16:10:59.074284:
SingleGeneQuery(22)
2017-05-15 16:10:59.075093:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.076762:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.077470:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.078363:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.078960:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.079646:
HysteresisQuery: Returning.
2017-05-15 16:10:59.080246:
HysteresisQuery(23)
2017-05-15 16:10:59.080846:
SingleGeneQuery(23)
2017-05-15 16:10:59.081562:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.082997:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.083616:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.084219:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.084861:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.085622:
HysteresisQuery: Returning.
2017-05-15 16:10:59.086305:
HysteresisQuery(24)
2017-05-15 16:10:59.087071:
SingleGeneQuery(24)
2017-05-15 16:10:59.087698:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.088541:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.089242:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.089970:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.090765:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.091447:
HysteresisQuery: Returning.
2017-05-15 16:10:59.092095:
HysteresisQuery(25)
2017-05-15 16:10:59.092684:
SingleGeneQuery(25)
2017-05-15 16:10:59.093507:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.094550:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.095327:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.096054:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.096671:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.097426:
HysteresisQuery: Returning.
2017-05-15 16:10:59.098036:
HysteresisQuery(26)
2017-05-15 16:10:59.098650:
SingleGeneQuery(26)
2017-05-15 16:10:59.099509:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.100328:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.100904:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.101527:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.102153:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.102826:
HysteresisQuery: Returning.
2017-05-15 16:10:59.103482:
HysteresisQuery(27)
2017-05-15 16:10:59.104372:
SingleGeneQuery(27)
2017-05-15 16:10:59.105196:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.107066:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.107838:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.108660:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.109256:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.110084:
HysteresisQuery: Returning.
2017-05-15 16:10:59.110710:
HysteresisQuery(28)
2017-05-15 16:10:59.111378:
SingleGeneQuery(28)
2017-05-15 16:10:59.112303:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.113826:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.114577:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.115232:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.115890:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.133589:
HysteresisQuery: Returning.
2017-05-15 16:10:59.134186:
HysteresisQuery(29)
2017-05-15 16:10:59.134809:
SingleGeneQuery(29)
2017-05-15 16:10:59.135675:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.137072:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.137819:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.138445:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.139220:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.140138:
HysteresisQuery: Returning.
2017-05-15 16:10:59.141003:
HysteresisQuery(30)
2017-05-15 16:10:59.141647:
SingleGeneQuery(30)
2017-05-15 16:10:59.142509:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.144353:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.145374:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.146374:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.147069:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.147910:
HysteresisQuery: Returning.
2017-05-15 16:10:59.148530:
HysteresisQuery(31)
2017-05-15 16:10:59.149158:
SingleGeneQuery(31)
2017-05-15 16:10:59.149903:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.151188:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.151730:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.152392:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.152985:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.153728:
HysteresisQuery: Returning.
2017-05-15 16:10:59.154547:
HysteresisQuery(32)
2017-05-15 16:10:59.155106:
SingleGeneQuery(32)
2017-05-15 16:10:59.155816:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.157485:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.158112:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.159238:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.159876:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.161096:
HysteresisQuery: Returning.
2017-05-15 16:10:59.161837:
HysteresisQuery(33)
2017-05-15 16:10:59.162622:
SingleGeneQuery(33)
2017-05-15 16:10:59.163355:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.164487:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.165065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.165653:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.166635:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.167294:
HysteresisQuery: Returning.
2017-05-15 16:10:59.167901:
HysteresisQuery(34)
2017-05-15 16:10:59.168463:
SingleGeneQuery(34)
2017-05-15 16:10:59.169080:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.170700:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.171290:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.172051:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.172926:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.174014:
HysteresisQuery: Returning.
2017-05-15 16:10:59.174742:
HysteresisQuery(35)
2017-05-15 16:10:59.175388:
SingleGeneQuery(35)
2017-05-15 16:10:59.176129:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.178194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.179240:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.179848:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.180472:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.181198:
HysteresisQuery: Returning.
2017-05-15 16:10:59.181750:
HysteresisQuery(36)
2017-05-15 16:10:59.182293:
SingleGeneQuery(36)
2017-05-15 16:10:59.182921:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.184325:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.184931:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.185481:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.186402:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.187155:
HysteresisQuery: Returning.
2017-05-15 16:10:59.188056:
HysteresisQuery(37)
2017-05-15 16:10:59.188726:
SingleGeneQuery(37)
2017-05-15 16:10:59.189729:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.191592:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.192395:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.193052:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.193761:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.195566:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.196475:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.197076:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.197733:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.198502:
HysteresisQuery: Returning.
2017-05-15 16:10:59.199395:
HysteresisQuery(38)
2017-05-15 16:10:59.199933:
SingleGeneQuery(38)
2017-05-15 16:10:59.200581:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.202178:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.202897:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.203629:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.204317:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.205148:
HysteresisQuery: Returning.
2017-05-15 16:10:59.205731:
HysteresisQuery(39)
2017-05-15 16:10:59.206419:
SingleGeneQuery(39)
2017-05-15 16:10:59.207140:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.208401:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.209008:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.209657:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.210644:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.211457:
HysteresisQuery: Returning.
2017-05-15 16:10:59.212741:
HysteresisQuery(40)
2017-05-15 16:10:59.213296:
SingleGeneQuery(40)
2017-05-15 16:10:59.213971:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.215618:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.216326:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.217036:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.217763:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.218467:
HysteresisQuery: Returning.
2017-05-15 16:10:59.219023:
HysteresisQuery(41)
2017-05-15 16:10:59.219946:
SingleGeneQuery(41)
2017-05-15 16:10:59.220638:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.221879:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.222647:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.223351:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.224183:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.225608:
HysteresisQuery: Returning.
2017-05-15 16:10:59.226856:
HysteresisQuery(42)
2017-05-15 16:10:59.227565:
SingleGeneQuery(42)
2017-05-15 16:10:59.228438:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.229993:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.230957:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.231562:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.232256:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.233054:
HysteresisQuery: Returning.
2017-05-15 16:10:59.234030:
HysteresisQuery(43)
2017-05-15 16:10:59.234659:
SingleGeneQuery(43)
2017-05-15 16:10:59.235422:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.237588:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.238455:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.239059:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.239594:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.241047:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.241731:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.242572:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.243175:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.243747:
HysteresisQuery: Returning.
2017-05-15 16:10:59.244397:
HysteresisQuery(44)
2017-05-15 16:10:59.244972:
SingleGeneQuery(44)
2017-05-15 16:10:59.245859:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.247160:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.248281:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.248892:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.249427:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.250107:
HysteresisQuery: Returning.
2017-05-15 16:10:59.250647:
HysteresisQuery(45)
2017-05-15 16:10:59.251182:
SingleGeneQuery(45)
2017-05-15 16:10:59.251921:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.253722:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.254367:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.255042:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.255805:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.256841:
HysteresisQuery: Returning.
2017-05-15 16:10:59.258024:
HysteresisQuery(46)
2017-05-15 16:10:59.258671:
SingleGeneQuery(46)
2017-05-15 16:10:59.259342:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.260873:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.261541:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.262168:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.263028:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.264609:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.265730:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.266521:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.267167:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.267760:
HysteresisQuery: Returning.
2017-05-15 16:10:59.268369:
HysteresisQuery(47)
2017-05-15 16:10:59.269056:
SingleGeneQuery(47)
2017-05-15 16:10:59.269708:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.271671:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.272350:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.273018:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.273642:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.274736:
HysteresisQuery: Returning.
2017-05-15 16:10:59.275267:
HysteresisQuery(48)
2017-05-15 16:10:59.275881:
SingleGeneQuery(48)
2017-05-15 16:10:59.276829:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.278940:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.279943:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.280790:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.281498:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.282247:
HysteresisQuery: Returning.
2017-05-15 16:10:59.282830:
HysteresisQuery(49)
2017-05-15 16:10:59.283408:
SingleGeneQuery(49)
2017-05-15 16:10:59.284035:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.285326:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.286072:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.286708:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.287414:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.289123:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.290222:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.290828:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.291989:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.293019:
HysteresisQuery: Returning.
2017-05-15 16:10:59.293603:
HysteresisQuery(50)
2017-05-15 16:10:59.294743:
SingleGeneQuery(50)
2017-05-15 16:10:59.295733:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.297396:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.298040:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.299253:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.299864:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.300596:
HysteresisQuery: Returning.
2017-05-15 16:10:59.301234:
HysteresisQuery(51)
2017-05-15 16:10:59.301786:
SingleGeneQuery(51)
2017-05-15 16:10:59.302515:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.303856:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.304867:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.305641:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.306201:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.307089:
HysteresisQuery: Returning.
2017-05-15 16:10:59.307969:
HysteresisQuery(52)
2017-05-15 16:10:59.308626:
SingleGeneQuery(52)
2017-05-15 16:10:59.310024:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.311635:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.312582:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.313185:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.314051:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.315610:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.316203:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.316764:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.317398:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.317998:
HysteresisQuery: Returning.
2017-05-15 16:10:59.318601:
HysteresisQuery(53)
2017-05-15 16:10:59.319220:
SingleGeneQuery(53)
2017-05-15 16:10:59.319941:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.320775:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.321458:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.322176:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.322753:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.323531:
HysteresisQuery: Returning.
2017-05-15 16:10:59.324662:
HysteresisQuery(54)
2017-05-15 16:10:59.325374:
SingleGeneQuery(54)
2017-05-15 16:10:59.326086:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.326946:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.328077:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.328846:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.329460:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.330256:
HysteresisQuery: Returning.
2017-05-15 16:10:59.330906:
HysteresisQuery(55)
2017-05-15 16:10:59.331662:
SingleGeneQuery(55)
2017-05-15 16:10:59.332439:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.333849:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.334444:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.335045:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.335885:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.337484:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.338240:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.338857:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.339635:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.340494:
HysteresisQuery: Returning.
2017-05-15 16:10:59.341266:
HysteresisQuery(56)
2017-05-15 16:10:59.341934:
SingleGeneQuery(56)
2017-05-15 16:10:59.342865:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.344553:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.345636:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.346584:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.347493:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.348252:
HysteresisQuery: Returning.
2017-05-15 16:10:59.348897:
HysteresisQuery(57)
2017-05-15 16:10:59.349517:
SingleGeneQuery(57)
2017-05-15 16:10:59.350248:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.351517:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.352122:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.352781:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.353388:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.354177:
HysteresisQuery: Returning.
2017-05-15 16:10:59.354877:
HysteresisQuery(58)
2017-05-15 16:10:59.355472:
SingleGeneQuery(58)
2017-05-15 16:10:59.356177:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.358259:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.359380:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.360030:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.360653:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.361399:
HysteresisQuery: Returning.
2017-05-15 16:10:59.362165:
HysteresisQuery(59)
2017-05-15 16:10:59.362819:
SingleGeneQuery(59)
2017-05-15 16:10:59.363561:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.365137:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.366016:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.366619:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.367399:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.368358:
HysteresisQuery: Returning.
2017-05-15 16:10:59.369514:
HysteresisQuery(60)
2017-05-15 16:10:59.370527:
SingleGeneQuery(60)
2017-05-15 16:10:59.371411:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.373308:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.374272:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.374868:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.376168:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.377205:
HysteresisQuery: Returning.
2017-05-15 16:10:59.377771:
HysteresisQuery(61)
2017-05-15 16:10:59.378457:
SingleGeneQuery(61)
2017-05-15 16:10:59.379277:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.381111:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.382229:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.382855:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.383483:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.384772:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.385420:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.385977:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.386655:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.387242:
HysteresisQuery: Returning.
2017-05-15 16:10:59.388233:
HysteresisQuery(62)
2017-05-15 16:10:59.388854:
SingleGeneQuery(62)
2017-05-15 16:10:59.390016:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.390894:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.391962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.392591:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.393202:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.394046:
HysteresisQuery: Returning.
2017-05-15 16:10:59.394816:
HysteresisQuery(63)
2017-05-15 16:10:59.395318:
SingleGeneQuery(63)
2017-05-15 16:10:59.396110:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.397857:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.398602:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.399309:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.399973:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.400973:
HysteresisQuery: Returning.
2017-05-15 16:10:59.401464:
HysteresisQuery(64)
2017-05-15 16:10:59.402072:
SingleGeneQuery(64)
2017-05-15 16:10:59.402816:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.404386:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.405503:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.406275:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.406934:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.408468:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.409150:
HysteresisQuery: Alignment Graph has 15 vertices
2017-05-15 16:10:59.409710:
HysteresisQuery: Alignment Graph has 23 edges
2017-05-15 16:10:59.410276:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.410889:
HysteresisQuery: Returning.
2017-05-15 16:10:59.411573:
HysteresisQuery(65)
2017-05-15 16:10:59.412619:
SingleGeneQuery(65)
2017-05-15 16:10:59.413247:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.414753:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.415337:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.415983:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.416566:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.417337:
HysteresisQuery: Returning.
2017-05-15 16:10:59.418095:
HysteresisQuery(66)
2017-05-15 16:10:59.418731:
SingleGeneQuery(66)
2017-05-15 16:10:59.419501:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.420960:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.421685:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.422311:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.423058:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.423848:
HysteresisQuery: Returning.
2017-05-15 16:10:59.424462:
HysteresisQuery(67)
2017-05-15 16:10:59.425094:
SingleGeneQuery(67)
2017-05-15 16:10:59.425869:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.427549:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.428244:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.429315:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.430002:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.431646:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.432763:
HysteresisQuery: Alignment Graph has 23 vertices
2017-05-15 16:10:59.433303:
HysteresisQuery: Alignment Graph has 33 edges
2017-05-15 16:10:59.434021:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.434835:
HysteresisQuery: Returning.
2017-05-15 16:10:59.435425:
HysteresisQuery(68)
2017-05-15 16:10:59.436088:
SingleGeneQuery(68)
2017-05-15 16:10:59.436707:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.438199:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.438890:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.440059:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.440685:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.441539:
HysteresisQuery: Returning.
2017-05-15 16:10:59.442542:
HysteresisQuery(69)
2017-05-15 16:10:59.443318:
SingleGeneQuery(69)
2017-05-15 16:10:59.443987:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.445756:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.446556:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.447140:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.447720:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.448432:
HysteresisQuery: Returning.
2017-05-15 16:10:59.449099:
HysteresisQuery(70)
2017-05-15 16:10:59.449884:
SingleGeneQuery(70)
2017-05-15 16:10:59.450692:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.452172:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.452731:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.453351:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.454085:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.455814:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.456499:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.457218:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.458000:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.458559:
HysteresisQuery: Returning.
2017-05-15 16:10:59.459376:
HysteresisQuery(71)
2017-05-15 16:10:59.460227:
SingleGeneQuery(71)
2017-05-15 16:10:59.460929:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.463051:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.464233:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.464749:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.465504:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.466782:
HysteresisQuery: Returning.
2017-05-15 16:10:59.467452:
HysteresisQuery(72)
2017-05-15 16:10:59.468155:
SingleGeneQuery(72)
2017-05-15 16:10:59.468883:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.470194:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.471110:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.471700:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.472420:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.473558:
HysteresisQuery: Returning.
2017-05-15 16:10:59.474192:
HysteresisQuery(73)
2017-05-15 16:10:59.475116:
SingleGeneQuery(73)
2017-05-15 16:10:59.475797:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.476687:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.477392:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.478052:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.478637:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.479372:
HysteresisQuery: Returning.
2017-05-15 16:10:59.479975:
HysteresisQuery(74)
2017-05-15 16:10:59.480899:
SingleGeneQuery(74)
2017-05-15 16:10:59.481804:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.483363:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.484164:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.484764:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.485369:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.486077:
HysteresisQuery: Returning.
2017-05-15 16:10:59.486708:
HysteresisQuery(75)
2017-05-15 16:10:59.487256:
SingleGeneQuery(75)
2017-05-15 16:10:59.487946:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.490162:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.491566:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.492276:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.492918:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.494133:
HysteresisQuery: Returning.
2017-05-15 16:10:59.494699:
HysteresisQuery(76)
2017-05-15 16:10:59.495599:
SingleGeneQuery(76)
2017-05-15 16:10:59.496323:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.497889:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.498788:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.499442:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.500682:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.501784:
HysteresisQuery: Returning.
2017-05-15 16:10:59.502433:
HysteresisQuery(77)
2017-05-15 16:10:59.503001:
SingleGeneQuery(77)
2017-05-15 16:10:59.503667:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.505095:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.505830:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.506541:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.507274:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.508550:
HysteresisQuery: Returning.
2017-05-15 16:10:59.509147:
HysteresisQuery(78)
2017-05-15 16:10:59.509768:
SingleGeneQuery(78)
2017-05-15 16:10:59.510579:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.511874:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.512851:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.513719:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.514392:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.515186:
HysteresisQuery: Returning.
2017-05-15 16:10:59.516025:
HysteresisQuery(79)
2017-05-15 16:10:59.516657:
SingleGeneQuery(79)
2017-05-15 16:10:59.517426:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.519222:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.519796:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.520442:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.521107:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.522731:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.523687:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.524239:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.525264:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.526302:
HysteresisQuery: Returning.
2017-05-15 16:10:59.526863:
HysteresisQuery(80)
2017-05-15 16:10:59.527611:
SingleGeneQuery(80)
2017-05-15 16:10:59.528223:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.530177:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.530944:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.531873:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.532853:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.533604:
HysteresisQuery: Returning.
2017-05-15 16:10:59.534326:
HysteresisQuery(81)
2017-05-15 16:10:59.535197:
SingleGeneQuery(81)
2017-05-15 16:10:59.535914:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.537681:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.538697:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.539322:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.540025:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.540759:
HysteresisQuery: Returning.
2017-05-15 16:10:59.541758:
HysteresisQuery(82)
2017-05-15 16:10:59.542604:
SingleGeneQuery(82)
2017-05-15 16:10:59.543263:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.545240:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.545921:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.546974:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.547613:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.548473:
HysteresisQuery: Returning.
2017-05-15 16:10:59.549092:
HysteresisQuery(83)
2017-05-15 16:10:59.549623:
SingleGeneQuery(83)
2017-05-15 16:10:59.550332:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.551706:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.552443:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.553014:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.553654:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.554350:
HysteresisQuery: Returning.
2017-05-15 16:10:59.554945:
HysteresisQuery(84)
2017-05-15 16:10:59.555554:
SingleGeneQuery(84)
2017-05-15 16:10:59.556643:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.557454:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.558473:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.559130:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.559656:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.560411:
HysteresisQuery: Returning.
2017-05-15 16:10:59.561167:
HysteresisQuery(85)
2017-05-15 16:10:59.561970:
SingleGeneQuery(85)
2017-05-15 16:10:59.562623:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.563980:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.564592:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.565209:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.565854:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.566737:
HysteresisQuery: Returning.
2017-05-15 16:10:59.567246:
HysteresisQuery(86)
2017-05-15 16:10:59.567879:
SingleGeneQuery(86)
2017-05-15 16:10:59.568569:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.570001:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.571067:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.571811:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.572949:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.573614:
HysteresisQuery: Returning.
2017-05-15 16:10:59.574240:
HysteresisQuery(87)
2017-05-15 16:10:59.574886:
SingleGeneQuery(87)
2017-05-15 16:10:59.575487:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.577500:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.578224:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.578874:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.579387:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.580229:
HysteresisQuery: Returning.
2017-05-15 16:10:59.580963:
HysteresisQuery(88)
2017-05-15 16:10:59.581742:
SingleGeneQuery(88)
2017-05-15 16:10:59.582606:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.583758:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.584905:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.585539:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.586188:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.586996:
HysteresisQuery: Returning.
2017-05-15 16:10:59.587922:
HysteresisQuery(89)
2017-05-15 16:10:59.588532:
SingleGeneQuery(89)
2017-05-15 16:10:59.589372:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.591928:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.592674:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.593619:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.594239:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.595042:
HysteresisQuery: Returning.
2017-05-15 16:10:59.595828:
HysteresisQuery(90)
2017-05-15 16:10:59.596596:
SingleGeneQuery(90)
2017-05-15 16:10:59.597267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.598737:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.599847:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.600524:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.601080:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.601880:
HysteresisQuery: Returning.
2017-05-15 16:10:59.602438:
HysteresisQuery(91)
2017-05-15 16:10:59.602946:
SingleGeneQuery(91)
2017-05-15 16:10:59.603626:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.605439:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.606751:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.607481:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.608381:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.609151:
HysteresisQuery: Returning.
2017-05-15 16:10:59.610075:
HysteresisQuery(92)
2017-05-15 16:10:59.610784:
SingleGeneQuery(92)
2017-05-15 16:10:59.611995:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.613432:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.614065:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.614612:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.615507:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.616194:
HysteresisQuery: Returning.
2017-05-15 16:10:59.616997:
HysteresisQuery(93)
2017-05-15 16:10:59.617551:
SingleGeneQuery(93)
2017-05-15 16:10:59.618267:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.619719:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.620707:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.621323:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.621860:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.622852:
HysteresisQuery: Returning.
2017-05-15 16:10:59.623514:
HysteresisQuery(94)
2017-05-15 16:10:59.624207:
SingleGeneQuery(94)
2017-05-15 16:10:59.625143:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.626280:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.626920:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.627667:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.628384:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.629609:
HysteresisQuery: Returning.
2017-05-15 16:10:59.630211:
HysteresisQuery(95)
2017-05-15 16:10:59.630820:
SingleGeneQuery(95)
2017-05-15 16:10:59.631589:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.632940:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.633707:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.634380:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.634985:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.635712:
HysteresisQuery: Returning.
2017-05-15 16:10:59.636343:
HysteresisQuery(96)
2017-05-15 16:10:59.637065:
SingleGeneQuery(96)
2017-05-15 16:10:59.637804:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.639575:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.640409:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.641061:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.641702:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.643084:
HysteresisQuery: Returning.
2017-05-15 16:10:59.643682:
HysteresisQuery(97)
2017-05-15 16:10:59.644255:
SingleGeneQuery(97)
2017-05-15 16:10:59.645163:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.646754:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.647422:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.648100:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.648732:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.650359:
HysteresisQuery: Alignment Graph Constructed.
2017-05-15 16:10:59.651095:
HysteresisQuery: Alignment Graph has 10 vertices
2017-05-15 16:10:59.651681:
HysteresisQuery: Alignment Graph has 12 edges
2017-05-15 16:10:59.652311:
HysteresisQuery: Reachability computed.
2017-05-15 16:10:59.652913:
HysteresisQuery: Returning.
2017-05-15 16:10:59.653457:
HysteresisQuery(98)
2017-05-15 16:10:59.654592:
SingleGeneQuery(98)
2017-05-15 16:10:59.655305:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.657013:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.657628:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.658566:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.659288:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.660094:
HysteresisQuery: Returning.
2017-05-15 16:10:59.660800:
HysteresisQuery(99)
2017-05-15 16:10:59.661414:
SingleGeneQuery(99)
2017-05-15 16:10:59.662272:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.663458:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.664042:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.664886:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.665777:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.666696:
HysteresisQuery: Returning.
2017-05-15 16:10:59.667328:
HysteresisQuery(100)
2017-05-15 16:10:59.667950:
SingleGeneQuery(100)
2017-05-15 16:10:59.668654:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.669414:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.670591:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.671354:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.672009:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.672929:
HysteresisQuery: Returning.
2017-05-15 16:10:59.673989:
HysteresisQuery(101)
2017-05-15 16:10:59.675009:
SingleGeneQuery(101)
2017-05-15 16:10:59.675738:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.677022:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.677949:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.678610:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.679638:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.680487:
HysteresisQuery: Returning.
2017-05-15 16:10:59.681158:
HysteresisQuery(102)
2017-05-15 16:10:59.681746:
SingleGeneQuery(102)
2017-05-15 16:10:59.682911:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.684178:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.684880:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.685749:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.686297:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.687015:
HysteresisQuery: Returning.
2017-05-15 16:10:59.687763:
HysteresisQuery(103)
2017-05-15 16:10:59.688320:
SingleGeneQuery(103)
2017-05-15 16:10:59.689069:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.690529:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.691160:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.692236:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.692858:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.693641:
HysteresisQuery: Returning.
2017-05-15 16:10:59.694334:
HysteresisQuery(104)
2017-05-15 16:10:59.695027:
SingleGeneQuery(104)
2017-05-15 16:10:59.695986:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.696888:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.697697:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.698335:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.699019:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.699939:
HysteresisQuery: Returning.
2017-05-15 16:10:59.700820:
HysteresisQuery(105)
2017-05-15 16:10:59.701475:
SingleGeneQuery(105)
2017-05-15 16:10:59.702355:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.703911:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.704601:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.705298:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.706034:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.706752:
HysteresisQuery: Returning.
2017-05-15 16:10:59.707385:
HysteresisQuery(106)
2017-05-15 16:10:59.708109:
SingleGeneQuery(106)
2017-05-15 16:10:59.708818:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.710405:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.711201:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.711865:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.712500:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.713150:
HysteresisQuery: Returning.
2017-05-15 16:10:59.714075:
HysteresisQuery(107)
2017-05-15 16:10:59.714602:
SingleGeneQuery(107)
2017-05-15 16:10:59.715356:
SingleGeneQuery: SQL statement executed
2017-05-15 16:10:59.717133:
SingleGeneQuery: Q constructed
2017-05-15 16:10:59.717962:
SingleGeneQuery: graph constructed
2017-05-15 16:10:59.718575:
SingleGeneQuery: graph attributes emplaced
2017-05-15 16:10:59.719224:
HysteresisQuery: Search Graph Constructed.
2017-05-15 16:10:59.720057:
HysteresisQuery: Returning.
###Markdown
DSGRN Query Functions
###Code
from DSGRN import *
database = Database("querytest.db")
database.parametergraph.dimension()
###Output
_____no_output_____
###Markdown
We show here the network being considered in this example:
###Code
database
print(database.network.specification())
###Output
X1 : (X1)(~X3)
X2 : X1
X3 : (X1)(~X2)
###Markdown
Query OverviewIn order to perform queries on the database sometimes preprocessing is necessary. In order to give a uniform approach to this we have adopted a design where each query corresponds to a python class whose name ends with the suffix `Query`. Each class has a constructor (i.e. `__init__` method) which accepts some arguments to indicate parameters of the query (e.g. which database).We currently have the following queries:| Name | Query Parameters | Query Input | Query Output || ---- | ----------- | ------------ | --- || MonostableQuery | Database | Morse Graph Index | True/False || BistableQuery | Database | Morse Graph Index | True/False || MultistableQuery | Database | Morse Graph Index | True/False || SingleGeneQuery | Database, Name of Network Node | Reduced Parameter Index | Annotated Factor Graph || SingleFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || DoubleFixedPointQuery | Database, pair of Domain Bounds | Morse Graph Index | True/False || MonostableFixedPointQuery | Database, Domain Bounds | Morse Graph Index | True/False || InducibilityQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | Triple of True/False || HysteresisQuery | Database, Name of Network Node, pair of Domain Bounds | Reduced Parameter Index | True/False |When the query object is constructed, it is passed the required parameters and any preprocessing that is required to support the query is done. In some cases the preprocessing is trivial, and in other cases it may be more extensive. After the object is constructed, it can be used to perform queries. This is accomplished by invoking the objects `__call__` operator (i.e. treating the object as a function). The call operator receives the query input and returns the query output. For example:```single_gene_query = SingleGeneQuery(database, "X1")graph = single_gene_query(43)```In the first line, the query object is created with the query parameters `database` and `"X1"`. This results in computation being done to organize a table in the database to quickly support "Single Gene Queries". The created object `single_gene_query` has a method `__call__` which allows it to be called as a function in order to produce query results. The input of the `__call__` method is a "reduced parameter index" and what is returned will be an annotated graph structure specific to what this query does.In many cases the input to the query is a Morse Graph Index and the output is a boolean value which indicates whether or not the morse graph index is in a precomputed set of matches. These query classes typically also support another method `matches` which simply returns the set of matches. This allows the following code:```set_of_matches = SingleFixedPointQuery(database, domain_bounds).matches()```In this code, a query object is created, the `matches` method is called and returns the set of matches, but no reference to the query object is kept. When using this paradigm one should be careful not to unnecessarily create the same query multiple times, or else the same preprocessing step would be repeated. MonostableQuery, BistableQuery, and MultistableQuery
###Code
monostable_query_object = MonostableQuery(database)
bistable_query_object = BistableQuery(database)
multistable_query_object = MultistableQuery(database)
###Output
2021-03-26 11:38:17.836408:
MonostableQuery :: initializing
2021-03-26 11:38:17.841783:
MonostableQuery :: select MorseGraphIndex from (select MorseGraphIndex, count(*) as StableCount from (select MorseGraphIndex,Vertex from MorseGraphVertices except select MorseGraphIndex,Source from MorseGraphEdges) group by MorseGraphIndex) where StableCount=1;
2021-03-26 11:38:17.844806:
MonostableQuery :: constructed
###Markdown
Evaluate the query on a few Morse Graph Indices:
###Code
monostable_query_object(0)
monostable_query_object(1)
###Output
_____no_output_____
###Markdown
How many matches for each type of query?
###Code
print([len(monostable_query_object.matches()), len(bistable_query_object.matches()), len(multistable_query_object.matches())])
###Output
[45, 98, 110]
###Markdown
Print the list of Morse graph indices which satisfy the monostable query.
###Code
print(monostable_query_object.matches())
###Output
frozenset({0, 2, 3, 130, 131, 6, 9, 10, 11, 18, 19, 20, 21, 146, 148, 25, 26, 153, 28, 30, 32, 34, 36, 38, 40, 43, 49, 50, 53, 55, 56, 59, 60, 74, 75, 78, 79, 89, 96, 102, 104, 113, 122, 123, 127})
###Markdown
Directly verify that all returns matches satisfy the corresponding query:
###Code
all( monostable_query_object(mgi) for mgi in monostable_query_object.matches() )
database.DrawMorseGraph(131)
###Output
_____no_output_____
###Markdown
SingleGeneQueryOur interest is in fixing all combinatorial parameters except for the logic parameter corresponding to a single node and considering the set of parameters corresponding to this choice. Due to the factorization of the parameter graph, this set of parameters is isomorphic to the factor graph associated to the node of interest. In order to handle repeated queries efficiently, it is necessary to prepare a table which reorders information so that it is I/O efficient for algorithms to retrieve. The following does this:
###Code
single_gene_query = SingleGeneQuery(database, "X1")
###Output
2021-03-26 11:38:19.277785:
SingleGeneQuery(querytest.db, X1)
2021-03-26 11:38:19.291034:
SingleGeneQuery: FactorGraph generated
2021-03-26 11:38:19.292047:
SingleGeneQuery: SingleGeneQuery attribute missing from python database object.
2021-03-26 11:38:19.293239:
SingleGeneQuery: SingleGeneQuery attributes created.
2021-03-26 11:38:19.294424:
SingleGeneQuery: database structure unaware of gene X1
2021-03-26 11:38:19.295273:
SingleGeneQuery: sanitized X1
2021-03-26 11:38:19.296485:
SingleGeneQuery: cursor constructed
2021-03-26 11:38:19.300644:
SingleGeneQuery: checked for table
2021-03-26 11:38:19.301877:
SingleGeneQuery: added gene to python database object.
2021-03-26 11:38:19.302650:
SingleGeneQuery: constructed
###Markdown
For a single gene query, the queries are graphs isomorphic to the factor graph, and the number of such queries corresponds to the number of "reduced parameter indices". This will be explained in more depth shortly. To help explain this we first examine the following computation:
###Code
N = single_gene_query.number_of_gene_parameters()
M = single_gene_query.number_of_reduced_parameters()
L = database.parametergraph.size()
print([N, M, N*M, L])
###Output
[50, 108, 5400, 5400]
###Markdown
Importantly, this factorization corresponds to a way to convert a parameter index (an integer) into a pair of integers, one in [0,50) and the other in [0,108), which we call the _gene parameter index_ and the _reduced parameter index_. The manner in which this is done is technical and has to do with how the integers encode combinatorial parameters using a mixed-radix system. Roughly speaking, the gene parameter index is obtained by extracting a digit from the mixed-radix representation of the parameter index, and what remains after removing the digit entirely (not just setting it to 0) is the reduced parameter index. This process can be reversed as well, so both the original parameter index and the (GeneParameterIndex, ReducedParameterIndex) pair are equivalent representations. What the prepare step we just accomplished did was create a table with the database's information which sorted the information by ReducedParameterIndex first and GeneParameterIndex second. (The original database sorts by ParameterIndex.) Performing a single-gene queryNow we perform a query. The result which the query returns is a graph. This graph contains data which has the raw information obtained from the query in the form of a python dictionary (i,e, `{key1:value1, key2:value2,...}`) where the keys are gene parameter indices, and the values are tuples `(hexcode, parameter index, morsegraphindex)`
###Code
graph = single_gene_query(43) # 43 is a "reduced parameter index"
graph.data
###Output
_____no_output_____
###Markdown
The query above returns the "MorseGraphIndex" which can be used with the database to retrieve the Morse graph. However we might only want to know if the Morse graph has a certain property. For example, we might want to know if it has 1 minimal node, or multiple (2 or more) minimal nodes. We create a function which takes a "MorseGraphIndex" and returns True if the associated Morse graph has multiple minimal nodes and False otherwise. Visualizing the queryThe above information describes a partially ordered set. In this poset each node corresponds to a parameter index. Each parameter index corresponds to a pair of sub-indices called the "GeneParameterIndex" and the "ReducedParameterIndex" which are the integers resulting from splitting out the "digit" corresponding to the logic parameter of the gene of interest. The "GeneParameterIndex" corresponds directly to the logic parameter of the gene of interest which can also be represented with a "HexCode". Using the hex code representation we learn adjacency information (due to the GPG=CPG theorem). Since our query gives us all of this information, the query automatically determines this information and can display itself as a graph of the labelled poset corresponding to the query. It also comes equipped with some methods for checking graph properties (as we demonstrate later). The nodes themselves are labelled according to their "ParameterIndex" and "MorseGraphIndex":
###Code
graph
###Output
_____no_output_____
###Markdown
Features of the graph queryIn addition to being a graph there are other attributes of the query that are of use. In particular, The graph is as follows: * The vertices of the graph (`graph.vertices`) are named according to Gene Parameter Index (gpi). * `graph.edges` contains the directed edge p -> q iff p < q and the associated logic parameters are adjacent.* The graph is (by default) labelled with pairs (Parameter index, Morse graph index). The default graph labelling can be changed by replacing the `label` attribute with a new function. A `label` function takes the vertex name (i.e. gpi) as input and returns a label string.* The graph is (by default) colored blue. The default graph coloring can be changed by replacing teh `color` attribute with a new function. A `color` function takes the vertex name as an input and returns a new color string.In addition the following extra structures are provided:* `graph.data` is a dictionary from gene parameter index to (hex code, parameter index, morse graph index)* `graph.mgi` is a function which accepts a gpi and returns the associated Morse graph idnex* `graph.num_inputs` is the number of network edges which are inputs to the gene associated with the query* `graph.num_outputs`is the number of network edges which are outputs to the gene associated with the query* `graph.essential` is a boolean-valued function which determines if each vertex corresponds to an essential parameter node Changing the color to inspect node propertiesIn the above graph all the nodes have the same color. We can change this so that the color of the nodes reflects some property of our choosing. As an example, we might ask if a node has a Morse graph with multistability -- if so, we can color the node red, otherwise we can color the node blue. This is done as follows:
###Code
# Create a function which tells us if each vertex has the multistable property:
is_multistable = MultistableQuery(database)
# Change the coloring method of the graph to check for multistability:
graph.color = lambda v : "red" if is_multistable(v) else "blue"
# Display the graph:
graph
###Output
_____no_output_____
###Markdown
Testing the query resultThe above query indicates that some of the parameters associated with the query had multistability and some did not. In order to make sure everything is working properly, let's take an example of each class and draw the Morse graph. For instance, parameter index 2199 has Morse Graph 18, and is colored blue, which is supposed to correspond to a lack of multistability. We check this and find it is indeed the case:
###Code
database.DrawMorseGraph(18)
###Output
_____no_output_____
###Markdown
Similarly, our query result indicates parameter index 2180 corresponds to Morse Graph 84, which is colored red, indicated it _does_ exhibit multistability. We check this as well:
###Code
database.DrawMorseGraph(84)
###Output
_____no_output_____
###Markdown
SingleFixedPointQuery, DoubleFixedPointQueryWe have the capability to retrieve parameter indices for which a FP occurs in a certain location. We call these locations "domains". A domain can be indicated by which "bin" it corresponds to along each dimension. A bin is an interval bounded by either (a) consecutive thresholds in a given dimension, (b) between 0 and the first threshold, or (c) bounded below by the last threshold and unbounded above. In particular, for each dimension the number of thresholds is equal to the number of out-edges of the corresponding network node. If there are m such thresholds then there are m+1 locations (bins) along this dimension which we label 0, 1, 2, ..., m. This allows us to describe the location of a domain by listing bin numbers for each dimension.We can consider many domains at once which are grouped together in rectangular prisms. To represent these, we create a dictionary object where for each variable we product a key value pair where the key is the variable name and the value is a list of two integers [a,b] such that we mean that the variable can only occur in the bins between a and b (inclusive). If we omit a variable from the dictionary it is allowed to be in any bin. Also, if a=b we can simply write "a" instead of "[a,a]". For example:
###Code
bounds110 = {"X1":1,"X2":1,"X3":0} # Domain 1,1,0
bounds210 = {"X1":[2,2],"X2":[1,1],"X3":[0,1]} # Domain 2,1,0 or Domain 2,1,1
bounds311 = {"X1":[3,3],"X2":[1,1],"X3":[1,1]} # Domain 3,1,1
###Output
_____no_output_____
###Markdown
Using these "bounds" variables to represent groups of domains, we can use query functions which ask for the collection of morse graphs which have an "FP" node labelled with a domain in those bounds. For example, to find the set of Morse Graph indices corresponding to fixed points in the region specified by "bounds110":
###Code
matches110 = SingleFixedPointQuery(database, bounds110).matches()
###Output
2021-03-26 11:38:24.801402:
SingleFixedPointQuery :: initializing
2021-03-26 11:38:24.806637:
SingleFixedPointQuery :: calling MatchQuery
2021-03-26 11:38:24.808801:
MatchQuery({'X1': 1, 'X2': 1, 'X3': 0}, Matches)
2021-03-26 11:38:24.810941:
MatchQuery :: built expressions ["Label like 'FP { 1, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%'"]
2021-03-26 11:38:24.813198:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 1, _, _%';
2021-03-26 11:38:24.820971:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2021-03-26 11:38:24.824318:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%';
2021-03-26 11:38:24.828054:
MatchQuery :: constructed
2021-03-26 11:38:24.829777:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2021-03-26 11:38:24.830890:
SingleFixedPointQuery :: drop table Matches;
2021-03-26 11:38:24.832317:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds210":
###Code
matches210 = SingleFixedPointQuery(database, bounds210).matches()
###Output
2021-03-26 11:38:24.843469:
SingleFixedPointQuery :: initializing
2021-03-26 11:38:24.844934:
SingleFixedPointQuery :: calling MatchQuery
2021-03-26 11:38:24.846461:
MatchQuery({'X1': [2, 2], 'X2': [1, 1], 'X3': [0, 1]}, Matches)
2021-03-26 11:38:24.849297:
MatchQuery :: built expressions ["Label like 'FP { 2, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%'"]
2021-03-26 11:38:24.853661:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 2, _, _%';
2021-03-26 11:38:24.855543:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2021-03-26 11:38:24.858092:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 0%' or Label like 'FP { _, _, 1%';
2021-03-26 11:38:24.859825:
MatchQuery :: constructed
2021-03-26 11:38:24.860900:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2021-03-26 11:38:24.863301:
SingleFixedPointQuery :: drop table Matches;
2021-03-26 11:38:24.865939:
SingleFixedPointQuery :: constructed
###Markdown
Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds311":
###Code
matches311 = SingleFixedPointQuery(database, bounds311).matches()
###Output
2021-03-26 11:38:24.875990:
SingleFixedPointQuery :: initializing
2021-03-26 11:38:24.877216:
SingleFixedPointQuery :: calling MatchQuery
2021-03-26 11:38:24.879614:
MatchQuery({'X1': [3, 3], 'X2': [1, 1], 'X3': [1, 1]}, Matches)
2021-03-26 11:38:24.881177:
MatchQuery :: built expressions ["Label like 'FP { 3, _, _%'", "Label like 'FP { _, 1, _%'", "Label like 'FP { _, _, 1%'"]
2021-03-26 11:38:24.883614:
MatchQuery :: create temp table tmpMatches1 as select * from MorseGraphAnnotations where Label like 'FP { 3, _, _%';
2021-03-26 11:38:24.884952:
MatchQuery :: create temp table tmpMatches2 as select * from tmpMatches1 where Label like 'FP { _, 1, _%';
2021-03-26 11:38:24.886373:
MatchQuery :: create temp table Matches as select * from tmpMatches2 where Label like 'FP { _, _, 1%';
2021-03-26 11:38:24.888247:
MatchQuery :: constructed
2021-03-26 11:38:24.889070:
SingleFixedPointQuery :: select MorseGraphIndex from Matches;
2021-03-26 11:38:24.890719:
SingleFixedPointQuery :: drop table Matches;
2021-03-26 11:38:24.892467:
SingleFixedPointQuery :: constructed
###Markdown
Find the set of Morse Graph indices with both a fixed point in 1,1,0 and a fixed point in 3,1,1:
###Code
matches_both = DoubleFixedPointQuery(database, bounds110,bounds311).matches()
len(matches110), len(matches210), len(matches311), len(matches_both)
matches_both
###Output
_____no_output_____
###Markdown
Queries on Graph PropertiesIt is possible to make queries about graph properties. If we have developed a set of queries about the vertices, we can ask several kinds of questions:1) Does the minimal node have a certain property?2) Does the maximal node have a certain property?3) Must every path from the minimal node to the maximal node pass through a node with a certain property?We can even ask questions about how many paths from the minimal node to the maximal node have a certain property (or the fraction of paths). To help visualize the examples we color the graph "green", "blue", "red", and "yellow" according to each vertex's status with regard to the FP location query examples above. Specifically:
###Code
graph.color = lambda v : "green" if graph.mgi(v) in matches_both else ("blue" if graph.mgi(v) in matches210 else ( "yellow" if graph.mgi(v) in matches311 else "red"))
graph
minimum_gpi = 0
maximum_gpi = len(graph.vertices) - 1
###Output
_____no_output_____
###Markdown
Q1. Is the minimal node red?
###Code
graph.color(minimum_gpi) == "red"
###Output
_____no_output_____
###Markdown
Q2. Is the maximal node yellow?
###Code
graph.color(maximum_gpi) == "yellow"
###Output
_____no_output_____
###Markdown
Q3(a). Is there an essential green node?
###Code
any( graph.essential(v) and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
List all essential green nodes:
###Code
[v for v in graph.vertices if graph.essential(v) and graph.color(v) == "green"]
###Output
_____no_output_____
###Markdown
Q3(b). Does every path from min to max pass through green?
###Code
predicate = lambda v : graph.color(v) == "green"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
No, they don't. What percentage of them pass through green?
###Code
subgraph = graph.subgraph(lambda v : not predicate(v))
number_missing_green = subgraph.numberOfPaths(minimum_gpi,maximum_gpi)
total_number = graph.numberOfPaths(minimum_gpi,maximum_gpi)
print(str((1.0 - float(number_missing_green)/float(total_number))*100.0) + "%")
###Output
11.092985318107662%
###Markdown
Q3(b)'. Does every path from min to max pass through a blue vertex?
###Code
predicate = lambda v : graph.color(v) == "blue"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
###Output
_____no_output_____
###Markdown
Which means there are zero paths from minimum to maximum in the subgraph where we take out the blue vertices, correct?
###Code
subgraph = graph.subgraph(lambda v : graph.color(v) != "blue")
if subgraph.numberOfPaths(minimum_gpi,maximum_gpi) == 0: print("Correct.")
###Output
Correct.
###Markdown
Q3(c). Is there an intermediate (neither max nor min) green node?
###Code
any( v != minimum_gpi and v != maximum_gpi and graph.color(v) == "green" for v in graph.vertices)
###Output
_____no_output_____
###Markdown
Visualizing the Essential parameter nodes:
###Code
graph.color = lambda v : "red" if graph.essential(v) else "green"
graph
###Output
_____no_output_____
###Markdown
InducibilityQuery
###Code
inducibility_query_object = InducibilityQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(inducibility_query_object.GeneQuery.number_of_reduced_parameters())
[ inducibility_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2021-03-26 11:38:30.350106:
SingleGeneQuery(0)
2021-03-26 11:38:30.373098:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.376019:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.387189:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.399502:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.403822:
SingleGeneQuery(1)
2021-03-26 11:38:30.405776:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.407809:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.421185:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.423066:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.426723:
SingleGeneQuery(2)
2021-03-26 11:38:30.429211:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.430699:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.431578:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.433967:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.435796:
SingleGeneQuery(3)
2021-03-26 11:38:30.437403:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.439321:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.440789:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.442410:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.443246:
SingleGeneQuery(4)
2021-03-26 11:38:30.445277:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.447674:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.450529:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.452890:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.454125:
SingleGeneQuery(5)
2021-03-26 11:38:30.455116:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.457177:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.458231:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.459122:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.461325:
SingleGeneQuery(6)
2021-03-26 11:38:30.463664:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.466364:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.469647:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.472658:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.475511:
SingleGeneQuery(7)
2021-03-26 11:38:30.476885:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.479622:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.481635:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.483225:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.485281:
SingleGeneQuery(8)
2021-03-26 11:38:30.486518:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.491274:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.492419:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.495413:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.497081:
SingleGeneQuery(9)
2021-03-26 11:38:30.499760:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.501243:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.504215:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.507220:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.508622:
SingleGeneQuery(10)
2021-03-26 11:38:30.509400:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.510669:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.511886:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.512478:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.519548:
SingleGeneQuery(11)
2021-03-26 11:38:30.522364:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.524446:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.527173:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.528302:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.529886:
SingleGeneQuery(12)
2021-03-26 11:38:30.531211:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.535230:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.539546:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.541275:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.544133:
SingleGeneQuery(13)
2021-03-26 11:38:30.575266:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.584969:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.591960:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.600147:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.602922:
SingleGeneQuery(14)
2021-03-26 11:38:30.612159:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.615015:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.616272:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.620678:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.624429:
SingleGeneQuery(15)
2021-03-26 11:38:30.626547:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.635948:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.638910:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.640009:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.641707:
SingleGeneQuery(16)
2021-03-26 11:38:30.642940:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.644192:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.645557:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.647069:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.648248:
SingleGeneQuery(17)
2021-03-26 11:38:30.651227:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.655416:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.660705:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.662064:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.668278:
SingleGeneQuery(18)
2021-03-26 11:38:30.677882:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.678817:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.704341:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.712266:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.723666:
SingleGeneQuery(19)
2021-03-26 11:38:30.724455:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.726102:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.727788:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.729163:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.730635:
SingleGeneQuery(20)
2021-03-26 11:38:30.732785:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.736138:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.741559:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.743856:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.745455:
SingleGeneQuery(21)
2021-03-26 11:38:30.746332:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.747675:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.750838:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.752695:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.755669:
SingleGeneQuery(22)
2021-03-26 11:38:30.763536:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.765182:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.767945:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.773403:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.774812:
SingleGeneQuery(23)
2021-03-26 11:38:30.776152:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.777675:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.778547:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.781509:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.784828:
SingleGeneQuery(24)
2021-03-26 11:38:30.787031:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.789062:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.791245:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.793007:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.794590:
SingleGeneQuery(25)
2021-03-26 11:38:30.804034:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.810989:
SingleGeneQuery: Q constructed
2021-03-26 11:38:30.813586:
SingleGeneQuery: graph constructed
2021-03-26 11:38:30.824838:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:30.828820:
SingleGeneQuery(26)
2021-03-26 11:38:30.833697:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:30.837244:
SingleGeneQuery: Q constructed
###Markdown
HysteresisQuery
###Code
hysteresis_query_object = HysteresisQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(hysteresis_query_object.GeneQuery.number_of_reduced_parameters())
[ hysteresis_query_object(rpi) for rpi in reduced_parameters ][0:10]
###Output
2021-03-26 11:38:33.080333:
HysteresisQuery(0)
2021-03-26 11:38:33.083801:
SingleGeneQuery(0)
2021-03-26 11:38:33.085841:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.086945:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.088637:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.092945:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.095332:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.098977:
HysteresisQuery: Alignment Graph Constructed.
2021-03-26 11:38:33.100344:
HysteresisQuery: Alignment Graph has 0 vertices
2021-03-26 11:38:33.103879:
HysteresisQuery: Alignment Graph has 0 edges
2021-03-26 11:38:33.105305:
HysteresisQuery: Reachability computed.
2021-03-26 11:38:33.107778:
HysteresisQuery: Returning.
2021-03-26 11:38:33.109724:
HysteresisQuery(1)
2021-03-26 11:38:33.110992:
SingleGeneQuery(1)
2021-03-26 11:38:33.112525:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.115299:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.116398:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.117626:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.118530:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.119511:
HysteresisQuery: Returning.
2021-03-26 11:38:33.120557:
HysteresisQuery(2)
2021-03-26 11:38:33.121157:
SingleGeneQuery(2)
2021-03-26 11:38:33.122328:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.130924:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.135042:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.136447:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.137700:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.145372:
HysteresisQuery: Alignment Graph Constructed.
2021-03-26 11:38:33.147632:
HysteresisQuery: Alignment Graph has 5 vertices
2021-03-26 11:38:33.149153:
HysteresisQuery: Alignment Graph has 5 edges
2021-03-26 11:38:33.150960:
HysteresisQuery: Reachability computed.
2021-03-26 11:38:33.154577:
HysteresisQuery: Returning.
2021-03-26 11:38:33.155285:
HysteresisQuery(3)
2021-03-26 11:38:33.156354:
SingleGeneQuery(3)
2021-03-26 11:38:33.160102:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.164493:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.169171:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.170233:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.182343:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.187865:
HysteresisQuery: Returning.
2021-03-26 11:38:33.211485:
HysteresisQuery(4)
2021-03-26 11:38:33.212695:
SingleGeneQuery(4)
2021-03-26 11:38:33.214369:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.216432:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.219791:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.223336:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.227540:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.229143:
HysteresisQuery: Returning.
2021-03-26 11:38:33.230746:
HysteresisQuery(5)
2021-03-26 11:38:33.231865:
SingleGeneQuery(5)
2021-03-26 11:38:33.232919:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.234956:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.236434:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.237746:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.242952:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.244812:
HysteresisQuery: Returning.
2021-03-26 11:38:33.247509:
HysteresisQuery(6)
2021-03-26 11:38:33.250308:
SingleGeneQuery(6)
2021-03-26 11:38:33.253513:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.255722:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.259744:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.262647:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.264919:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.266015:
HysteresisQuery: Returning.
2021-03-26 11:38:33.267381:
HysteresisQuery(7)
2021-03-26 11:38:33.269747:
SingleGeneQuery(7)
2021-03-26 11:38:33.271474:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.273388:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.275394:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.279330:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.281004:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.283919:
HysteresisQuery: Alignment Graph Constructed.
2021-03-26 11:38:33.285458:
HysteresisQuery: Alignment Graph has 10 vertices
2021-03-26 11:38:33.286689:
HysteresisQuery: Alignment Graph has 12 edges
2021-03-26 11:38:33.289298:
HysteresisQuery: Reachability computed.
2021-03-26 11:38:33.293020:
HysteresisQuery: Returning.
2021-03-26 11:38:33.294512:
HysteresisQuery(8)
2021-03-26 11:38:33.296355:
SingleGeneQuery(8)
2021-03-26 11:38:33.297618:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.300863:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.301867:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.303971:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.308464:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.310695:
HysteresisQuery: Alignment Graph Constructed.
2021-03-26 11:38:33.313970:
HysteresisQuery: Alignment Graph has 15 vertices
2021-03-26 11:38:33.319561:
HysteresisQuery: Alignment Graph has 17 edges
2021-03-26 11:38:33.321688:
HysteresisQuery: Reachability computed.
2021-03-26 11:38:33.324313:
HysteresisQuery: Returning.
2021-03-26 11:38:33.331196:
HysteresisQuery(9)
2021-03-26 11:38:33.332343:
SingleGeneQuery(9)
2021-03-26 11:38:33.337983:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.340175:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.341625:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.343023:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.344340:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.345889:
HysteresisQuery: Returning.
2021-03-26 11:38:33.347284:
HysteresisQuery(10)
2021-03-26 11:38:33.348407:
SingleGeneQuery(10)
2021-03-26 11:38:33.353889:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.357213:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.358815:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.359762:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.362799:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.363797:
HysteresisQuery: Returning.
2021-03-26 11:38:33.366759:
HysteresisQuery(11)
2021-03-26 11:38:33.368039:
SingleGeneQuery(11)
2021-03-26 11:38:33.369876:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.371231:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.373950:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.375743:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.378674:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.384203:
HysteresisQuery: Returning.
2021-03-26 11:38:33.387707:
HysteresisQuery(12)
2021-03-26 11:38:33.388533:
SingleGeneQuery(12)
2021-03-26 11:38:33.389968:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.395094:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.400478:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.405713:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.413746:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.422112:
HysteresisQuery: Returning.
2021-03-26 11:38:33.423866:
HysteresisQuery(13)
2021-03-26 11:38:33.429181:
SingleGeneQuery(13)
2021-03-26 11:38:33.437266:
SingleGeneQuery: SQL statement executed
2021-03-26 11:38:33.444775:
SingleGeneQuery: Q constructed
2021-03-26 11:38:33.447447:
SingleGeneQuery: graph constructed
2021-03-26 11:38:33.454635:
SingleGeneQuery: graph attributes emplaced
2021-03-26 11:38:33.455412:
HysteresisQuery: Search Graph Constructed.
2021-03-26 11:38:33.458366:
HysteresisQuery: Alignment Graph Constructed.
2021-03-26 11:38:33.467791:
HysteresisQuery: Alignment Graph has 10 vertices
2021-03-26 11:38:33.469579:
HysteresisQuery: Alignment Graph has 12 edges
2021-03-26 11:38:33.471752:
HysteresisQuery: Reachability computed.
2021-03-26 11:38:33.475264:
HysteresisQuery: Returning.
|
devel/Error_Surf_Stack.ipynb | ###Markdown
Method:1) Split wave using set parameters, lots of noise, and variable polarisation.2) Try to recover the splitting parameters by error surface stacking.
###Code
# 1. Generate Synthetic Data
noise_level = 0.1
fast = 0.
lag = 2.
delta = 0.1
listM = [ sw.EigenM(pol=np.random.randint(360),
noise=noise_level,
split = (fast, lag),
delta = delta,
lags=(4,)) for _ in range(40) ]
# 2. Collect in Stack
S = sw.measure.Stack(listM)
# 3. Plot
cax = plt.contourf(S.lags,S.degs,S.stack(),26,cmap='magma')
plt.colorbar(cax)
plt.show()
cax = plt.contourf(S.lags,S.degs,S.stackpdf(),26,cmap='magma')
plt.colorbar(cax)
plt.show()
cax = plt.contourf(S.lags,S.degs,S.wolfe_silver(),26,cmap='magma_r')
plt.colorbar(cax)
plt.show()
cax = plt.contourf(S.lags,S.degs,S.restivo_helffrich(),26,cmap='magma_r')
plt.colorbar(cax)
plt.show()
# 1. Generate Synthetic Data with variable noise
# noise_level = 0.1
fast = 0.
lag = 2.
delta = 0.1
listM = [ sw.EigenM(pol=np.random.randint(360),
noise=0.2*np.random.rand(1),
split = (fast,lag),
delta = delta,
lags=(4,)) for _ in range(40) ]
# 2. Collect in Stack
S = sw.eigval.Stack(listM)
# 3. Plot
cax = plt.contourf(S.lags,S.degs,S.stack(),26,cmap='magma')
plt.colorbar(cax)
plt.show()
cax = plt.contourf(S.lags,S.degs,S.stackpdf(),26,cmap='magma')
plt.colorbar(cax)
plt.show()
cax = plt.contourf(S.lags,S.degs,S.wolfe_silver(),26,cmap='magma_r')
plt.colorbar(cax)
plt.show()
cax=plt.contourf(S.lags,S.degs,S.restivo_helffrich(),26,cmap='magma_r')
plt.colorbar(cax)
plt.show()
def sigmoid (x): return 1/(1 + np.exp(-x))
x = np.linspace(0,22,300)
plt.plot(x,sigmoid(2*x-10))
a = sw.Pair()
b = a.copy()
b.rotateto(b.pol())
b.chop()
a.snrRH()
a.rotateto(a.pol())
dat
###Output
_____no_output_____ |
notebooks/Figure3.ipynb | ###Markdown
clip the entire data into region
###Code
ds = warplib.memwarp_multi_fn([elevation_difference_fn,],extent=extent_clip)
ma = iolib.ds_getma(ds[0])
outfn = os.path.splitext(elevation_difference_fn)[0]+'_fig3_extent.tif'
iolib.writeGTiff(ma,outfn,src_ds=ds[0],ndv=-9999.0)
ds_final = warplib.memwarp_multi_fn([outfn,],t_srs='EPSG:3857',res='30')
ma = iolib.ds_getma(ds_final[0])
xmin,ymin,xmax,ymax = geolib.ds_extent(ds_final[0])
extent_fig = [xmin,xmax,ymin,ymax]
%matplotlib notebook
def point_convert(row):
geom = Point(row['lon'],row['lat'])
return geom
def cax_cbar(ax):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return cax
mt_everest_coord = (86.9250,27.9881) #lon,lat or x,y
Shishapangma_coord = (85.7792,28.3525)
df_point = pd.DataFrame({'name':['Mt. Everest', 'Mt. Shishapangma'], 'lon' : [86.9250,85.7792,],'lat':[27.9881,28.3525]})
df_point['geometry'] = df_point.apply(point_convert,axis=1)
gdf_point = gpd.GeoDataFrame(df_point,geometry='geometry',crs={'init':'epsg:4326'})
gdf_point = gdf_point.to_crs({'init':'epsg:3857'})
x_text = gdf_point.geometry.x.values
y_text = gdf_point.geometry.y.values
def cust_range(*args, rtol=1e-05, atol=1e-08, include=[True, False]):
#### from here https://stackoverflow.com/questions/50299172/python-range-or-numpy-arange-with-end-limit-include
"""
Combines numpy.arange and numpy.isclose to mimic
open, half-open and closed intervals.
Avoids also floating point rounding errors as with
>>> numpy.arange(1, 1.3, 0.1)
array([1. , 1.1, 1.2, 1.3])
args: [start, ]stop, [step, ]
as in numpy.arange
rtol, atol: floats
floating point tolerance as in numpy.isclose
include: boolean list-like, length 2
if start and end point are included
"""
# process arguments
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start, stop = args
step = 1
else:
assert len(args) == 3
start, stop, step = tuple(args)
# determine number of segments
n = (stop-start)/step + 1
# do rounding for n
if np.isclose(n, np.round(n), rtol=rtol, atol=atol):
n = np.round(n)
# correct for start/end is exluded
if not include[0]:
n -= 1
start += step
if not include[1]:
n -= 1
stop -= step
return np.linspace(start, stop, int(n))
def crange(*args, **kwargs):
return cust_range(*args, **kwargs, include=[True, True])
def orange(*args, **kwargs):
return cust_range(*args, **kwargs, include=[True, False])
def plot_geo_grid_old(ax,lat_in,lon_in,in_proj='epsg:3857'):
"""
plot geogragriphical grid on axes with projected coordinates
Inputs are ax object, latitude interval, longitude interval, assumed input projection is epsg:3857
"""
X_LIM = ax.get_xlim()
Y_LIM = ax.get_ylim()
init_proj =Proj(init=in_proj)
out_proj = Proj(init='epsg:4326')
X0 = transform(init_proj,out_proj,X_LIM[0],Y_LIM[0])[0]
x0 = np.floor(transform(init_proj,out_proj,X_LIM[0],Y_LIM[0])[0])+lon_in
x1 = transform(init_proj,out_proj, X_LIM[1],Y_LIM[0])[0]
Y = transform(init_proj,out_proj, X_LIM[1],Y_LIM[0])[1]
x_list = np.arange(x0,x1,lon_in)
x_list = sorted([i for i in x_list if i >= X0])
transform_4326 = list(zip(x_list,[Y]*len(x_list)))
transform_3857 = [transform(out_proj,init_proj,x[0],x[1]) for x in transform_4326]
X_in = [x[0] for x in transform_3857]
X_out = [np.round(x[0],2) for x in transform_4326]
Y0 = Y
y0 = np.floor(Y)+lat_in
y1 = transform(init_proj,out_proj,X_LIM[0],Y_LIM[1])[1]
X = x0
y_list = np.arange(y0,y1,lat_in)
y_list = sorted([i for i in y_list if i >= Y0])
transform_4326_y = list(zip([X]*len(y_list),y_list))
transform_3857_y = [transform(out_proj,init_proj,x[0],x[1]) for x in transform_4326_y]
Y_in = [x[1] for x in transform_3857_y]
Y_out = [np.round(x[1],2) for x in transform_4326_y]
ax.set_xticks(X_in)
ax.set_xticklabels(X_out, minor=False)
ax.set_yticks(Y_in)
ax.set_yticklabels(Y_out, minor=False)
return X_in,X_out,Y_in,Y_out
from pyproj import Proj, transform
from matplotlib.ticker import FormatStrFormatter
## modified by David
def plot_geo_grid(ax,lat_in=5.0,lon_in=5.0,in_proj={'init':'epsg:3857'}, fmt='%0.1f', grid=False,ne_label=True,maintain_extent = True, verbose=False):
"""
plot geogragriphical grid on axes with projected coordinates
Inputs are ax object, latitude interval, longitude interval, assumed input projection is epsg:3857
"""
#Get input axes limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#Define input and output projections
in_proj = Proj(in_proj)
out_proj = Proj(init='epsg:4326')
#Get lat/lon coord for lower left and upper right mapped coords
ll = transform(in_proj, out_proj, xlim[0], ylim[0])
lr = transform(in_proj, out_proj, xlim[1], ylim[0])
ul = transform(in_proj, out_proj, xlim[0], ylim[1])
ur = transform(in_proj, out_proj, xlim[1], ylim[1])
#Get number of expected lat or lon intervals
nx = np.floor((lr[0] - ll[0])/lon_in)
ny = np.floor((ul[1] - ll[1])/lat_in)
#Determine rounded lower left
ll_r = np.zeros(2)
ll_r[0] = lon_in * np.ceil(ll[0]/lon_in)
ll_r[1] = lat_in * np.ceil(ll[1]/lat_in)
#Calculate rounded upper right
ur_r = ((ll_r[0] + nx * lon_in), (ll_r[1] + ny * lat_in))
#Prepare lists of rounded coordinates at given intervals
x_list = np.arange(ll_r[0],ur_r[0]+lon_in,lon_in)
y_list = np.arange(ll_r[1],ur_r[1]+lat_in,lat_in)
if maintain_extent:
x_list = crange(ll_r[0],ur_r[0],lon_in)
y_list = crange(ll_r[1],ur_r[1],lat_in)
print(ur_r[0])
print(x_list)
x_tick_loc_out = list(zip(x_list, np.repeat(ll[1],x_list.size)))
y_tick_loc_out = list(zip(np.repeat(ll[0],y_list.size), y_list))
#Determine tick locations (in input crs) for the desired lat/lon coords
x_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[0] for xy in x_tick_loc_out])
y_tick_loc_init = np.array([transform(out_proj, in_proj, xy[0], xy[1])[1] for xy in y_tick_loc_out])
# verbose = False
if verbose:
print(x_tick_loc_out)
print(x_tick_loc_init)
print(y_tick_loc_out)
print(y_tick_loc_init)
#Set formatter
#ax.xaxis.set_major_formatter(FormatStrFormatter(fmt))
#ax.yaxis.set_major_formatter(FormatStrFormatter(fmt))
#Prepare tick labels with desired format
if ne_label:
x_tick_labels = [fmt % x +'$^\circ$E' for x in x_list]
y_tick_labels = [fmt % y +'$^\circ$N' for y in y_list]
x_label = 'Longitude'
y_label = 'Latitude'
else:
x_tick_labels = [fmt % x for x in x_list]
y_tick_labels = [fmt % y for y in y_list]
x_label = 'Longitude ($^\circ$E)'
y_label = 'Latitude ($^\circ$N)'
ax.set_xticks(x_tick_loc_init)
ax.set_xticklabels(x_tick_labels, minor=False)
ax.set_yticks(y_tick_loc_init)
ax.set_yticklabels(y_tick_labels, minor=False)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if grid:
ax.grid(ls=':')
fig,axa = plt.subplots(figsize=(7.5,5))
im = axa.imshow(np.ma.filled(np.ma.masked_where(ma.data==0,ma.data),np.nan),clim=(-3,3),cmap='RdBu',interpolation='bilinear',extent=extent_fig,aspect='equal')
#gdf2 = gdf.geometry.buffer(-10000)
#gdf2.to_crs({'init':'epsg:3857'}).plot(ax=axa,edgecolor='None',facecolor='None')
ctx.add_basemap(axa,zorder=-1)
plt.colorbar(im,ax=axa,extend='both',cax=cax_cbar(axa),label='elevation change (m/yr)')
#gdf_point.plot(ax=axa,marker="^",c='k')
scalebar = ScaleBar(1.0)
axa.add_artist(scalebar)
#axa.set_xticks([])
#axa.set_yticks([])
x_ax_min,x_ax_max = axa.get_xlim()
y_ax_min,y_ax_max = axa.get_ylim()
fac = x_ax_min-xmin
xmax_new = xmax+fac
ymin_new =ymin-fac
ymax_new = ymax+fac
axa.set_xlim((xmin,xmax_new-9000))
axa.set_ylim((ymin_new,ymax_new))
glac.plot(ax=axa,edgecolor='k',facecolor='None',linewidth=0.5,alpha=0.6)
#axa.text(x_text[0]+1000,y_text[0]-500,'Mt. Everest')
#axa.text(x_text[1]+1000,y_text[1]-200,'Shishapangma')
plt.tight_layout()
plot_geo_grid(axa,0.2,0.2,ne_label=False,verbose=False,maintain_extent=True)
#axa.grid()
#axa.set_xlabel('Longitude ($^\circ$E)')
#axa.set_ylabel('Latitude ($^\circ$N)')
#axa.text(9680000,3320000,'Tibet/China',color='k',weight='bold')
#axa.text(9564000,3220000,'Nepal',color='k',weight='bold')
axa.annotate('Mt. Everest', xy=(x_text[0],y_text[0]), xytext=(x_text[0]-24000,y_text[0]+38000),
arrowprops=dict(facecolor='black',arrowstyle="->"))
axa.annotate('Shishapangma', xy=(x_text[1],y_text[1]), xytext=(x_text[1]+20000,y_text[1]+15000),
arrowprops=dict(facecolor='black',arrowstyle="->"))
#arrowprops=dict(facecolor='black', shrink=0.05))
fig.savefig('Figure3_Shean_etal_HMA19_geogrid_final.jpg',fig_opt={'dpi':300, 'bbox_inches':'tight', 'pad_inches':0})
#list(axa.get_yticklabels())
###Output
_____no_output_____
###Markdown
Figure 3: Joint probability density distributions of vorticity vs. strain rate and vorticity vs. laplacian of SSHa
###Code
fig = plt.figure(figsize=(12,4))
ax = fig.add_subplot(241)
plt.contourf(pdfs['vorticity'][:],pdfs['strain'][:],pdfs['april/hourly']['pdf_vorticity_strain'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][100:],pdfs['vorticity'][100:],'k--')
plt.plot(pdfs['vorticity'][:100],-pdfs['vorticity'][:100],'k--')
plt.xlim(-4.,4.)
plt.ylim(0,4.)
plt.xticks([])
plt.yticks([])
plt.ylim(0.,4.)
plt.ylabel(r'Strain $\alpha/f$')
ticks=[0,2,4]
plt.yticks(ticks)
plt.text(-4.,4.15,'(a)',fontsize=14)
plt.title('Hourly',fontsize=11)
ax = fig.add_subplot(242)
plt.contourf(pdfs['vorticity'][:],pdfs['strain'][:],pdfs['april/daily-averaged']['pdf_vorticity_strain'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][100:],pdfs['vorticity'][100:],'k--')
plt.plot(pdfs['vorticity'][:100],-pdfs['vorticity'][:100],'k--')
plt.xlim(-4.,4.)
plt.ylim(0,4.)
plt.xticks([])
plt.yticks([])
plt.text(-4.,4.15,'(b)',fontsize=14)
plt.title('Daily-averaged',fontsize=11)
plt.text(-6,5.5,'April',fontsize=14)
ax = fig.add_subplot(2,4,3)
plt.contourf(pdfs['vorticity'][:],pdfs['strain'][:],pdfs['october/hourly']['pdf_vorticity_strain'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][100:],pdfs['vorticity'][100:],'k--')
plt.plot(pdfs['vorticity'][:100],-pdfs['vorticity'][:100],'k--')
plt.xlim(-4.,4.)
plt.ylim(0.,4.)
plt.yticks([])
xticks=[-4,-2,0,2,4]
plt.xticks([])
plt.text(-4.,4.15,'(c)',fontsize=14)
plt.title('Hourly',fontsize=11)
ax = fig.add_subplot(2,4,4)
plt.contourf(pdfs['vorticity'][:],pdfs['strain'][:],pdfs['october/daily-averaged']['pdf_vorticity_strain'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][100:],pdfs['vorticity'][100:],'k--')
plt.plot(pdfs['vorticity'][:100],-pdfs['vorticity'][:100],'k--')
plt.xlim(-4.,4.)
plt.ylim(0.,4.)
plt.xticks([])
plt.yticks([])
plt.text(-4.,4.15,'(d)',fontsize=14)
plt.title('Daily-averaged',fontsize=11)
plt.text(-6.75,5.5,'October',fontsize=14)
ax = fig.add_subplot(2,4,5)
plt.contourf(pdfs['vorticity'][:],pdfs['vorticity'][:],pdfs['april/hourly']['pdf_vorticity_lapssh'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][:],pdfs['vorticity'][:],'k--')
plt.xlim(-4.,4.)
plt.ylim(-4.,4.)
plt.ylabel(r'$(g/f^2) \, \nabla^2 \eta$')
ticks=[-4,-2,0,2,4]
plt.xticks(ticks)
plt.yticks(ticks)
plt.text(-4.,4.15,'(e)',fontsize=14)
#plt.xlabel(r'Vorticity $\zeta/f$')
ax = fig.add_subplot(2,4,6)
plt.contourf(pdfs['vorticity'][:],pdfs['vorticity'][:],pdfs['april/daily-averaged']['pdf_vorticity_lapssh'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][:],pdfs['vorticity'][:],'k--')
plt.xlim(-4.,4.)
plt.ylim(-4.,4.)
ticks=[-4,-2,0,2,4]
plt.xticks(ticks)
plt.yticks([])
plt.text(-4.,4.15,'(f)',fontsize=14)
#plt.xlabel(r'Vorticity $\zeta/f$')
ax = fig.add_subplot(2,4,7)
plt.contourf(pdfs['vorticity'][:],pdfs['vorticity'][:],pdfs['october/hourly']['pdf_vorticity_lapssh'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][:],pdfs['vorticity'][:],'k--')
plt.xlim(-4.,4.)
plt.ylim(-4.,4.)
plt.xticks(ticks)
ticks=[-4,-2,0,2,4]
plt.yticks([])
plt.text(-4.,4.15,'(g)',fontsize=14)
#plt.xlabel(r'Vorticity $\zeta/f$')
ax = fig.add_subplot(2,4,8)
cbs = plt.contourf(pdfs['vorticity'][:],pdfs['vorticity'][:],pdfs['october/daily-averaged']['pdf_vorticity_lapssh'][:],cpdf,vmin=1.e-5,vmax=10.,norm = LogNorm())
plt.plot(pdfs['vorticity'][:],pdfs['vorticity'][:],'k--')
plt.xlim(-4.,4.)
plt.ylim(-4.,4.)
plt.xticks(ticks)
plt.yticks([])
plt.text(-4.,4.15,'(h)',fontsize=14)
#plt.xlabel(r'Vorticity $\zeta/f$')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.825, 0.16, 0.01, 0.7])
fig.colorbar(cbs, cax=cbar_ax,label=r'Probability density',extend='both',ticks=[1.e-5,1e-4,1.e-3,1e-2,1.e-1,1,10.])
plt.savefig(__dest__[0],dpi=150,bbox_inches='tight')
###Output
/Users/crocha/anaconda3/lib/python3.5/site-packages/matplotlib/contour.py:1538: UserWarning: Log scale: values of z <= 0 have been masked
warnings.warn('Log scale: values of z <= 0 have been masked')
###Markdown
Figure 3
###Code
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
import scipy.stats as sp
import pandas as pd
from scipy.special import logsumexp
import replay_structure.metadata as meta
import replay_structure.read_write as read_write
import replay_structure.metadata as meta
import importlib
###Output
_____no_output_____
###Markdown
Model comparison results
###Code
ratday_data = dict()
model_comparison_results = dict()
deviance_explained_results = dict()
for session in meta.Session_List:
ratday_data[session.rat, session.day] = read_write.load_ratday_data(session)
model_comparison_results[session.rat, session.day] = read_write.load_model_comparison_results(
session, meta.Ripple_Data.default_time_window_ms, meta.Ripple_Data.name, meta.Ripple_Data.default_likelihood_function)
deviance_explained_results[session.rat, session.day] = read_write.load_deviance_explained_results(
session, meta.Ripple_Data.default_time_window_ms, meta.Ripple_Data.name, meta.Ripple_Data.default_likelihood_function)
# calculate mean/sd p(M|X) across sessions
p_models = np.zeros((meta.N_SESSIONS, meta.N_MODELS))
for i, session in enumerate(meta.Session_List):
p_models[i] = model_comparison_results[session.rat,session.day].random_effects_results["p_models"]
p_models_mean = np.mean(p_models, axis=0)
p_models_sd = np.std(p_models, axis=0)
# print p(M|X) mean and sd
for i, model in enumerate(meta.MODELS_AS_STR):
print(f"{model} model: {np.round(p_models_mean[i]*100,1)}% +/- {np.round(p_models_sd[i]*100,1)}%")
# print p(M|X) grouped by trajectory/non-trajectory models
print("\n")
sum_trajectory = np.sum(p_models[:,:2], axis=1)*100
sum_nontrajectory = np.sum(p_models[:,2:], axis=1)*100
print(f"trajectory models: {np.round(np.mean(sum_trajectory), 1)}% +/- {np.round(np.std(sum_trajectory),1)}")
print(f"non-trajecotry models: {np.round(np.mean(sum_nontrajectory), 1)}% +/- {np.round(np.std(sum_nontrajectory),1)}")
print(f"exceedance probabilities for models {meta.MODELS_AS_STR}")
for i, session in enumerate(meta.Session_List):
print(f"{session}: {model_comparison_results[session.rat,session.day].random_effects_results['p_exceedance']}")
###Output
exceedance probabilities for models ['diffusion', 'momentum', 'stationary', 'stationary_gaussian', 'random']
rat1day1: [0. 1. 0. 0. 0.]
rat1day2: [0. 1. 0. 0. 0.]
rat2day1: [0. 1. 0. 0. 0.]
rat2day2: [0. 1. 0. 0. 0.]
rat3day1: [0. 1. 0. 0. 0.]
rat3day2: [0. 1. 0. 0. 0.]
rat4day1: [0. 1. 0. 0. 0.]
rat4day2: [0. 1. 0. 0. 0.]
###Markdown
Comparison to previous classification results
###Code
percent_significant_ripples = np.zeros(meta.N_SESSIONS)
percent_trajectory_model = np.zeros(meta.N_SESSIONS)
for i, session in enumerate(meta.Session_List):
percent_significant_ripples[i] = len(ratday_data[session.rat, session.day].data['significant_ripples'])/ratday_data[session.rat, session.day].data['n_ripples']
percent_trajectory_model[i] = model_comparison_results[session.rat,session.day].random_effects_results["p_models"][[0,1]].sum()
percent_significant_ripples_mean = np.mean(percent_significant_ripples, axis=0)
percent_significant_ripples_sd = np.std(percent_significant_ripples, axis=0)
percent_trajectory_model_mean = np.mean(percent_trajectory_model, axis=0)
percent_trajectory_model_sd = np.std(percent_trajectory_model, axis=0)
print(f"previously classified: {np.round(np.mean(percent_significant_ripples)*100,1)}% +/- {np.round(np.std(percent_significant_ripples)*100,1)}")
###Output
previously classified: 22.8% +/- 8.4
###Markdown
Model recovery results
###Code
model_recovery_mc_results = dict()
model_recovery_random_effects = np.zeros((5, 5))
model_recovery_best_fit = np.zeros((5,5))
for i, session in enumerate(meta.Simulated_Session_List):
model_recovery_mc_results[str(session.model.name)] = read_write.load_model_comparison_results(
session, meta.Poisson_Simulated_Ripple_Data.default_time_window_ms, meta.Poisson_Simulated_Ripple_Data.name, meta.Poisson_Simulated_Ripple_Data.default_likelihood_function)
model_recovery_random_effects[i] = model_recovery_mc_results[str(session.model.name)].random_effects_results["p_models"]
model_recovery_best_fit[i] = model_recovery_mc_results[str(session.model.name)].max_ll_counts / model_recovery_mc_results[str(session.model.name)].max_ll_counts.sum()
# F1 score trajectory vs non_trajectory
cm_trajectory = np.zeros((2,2))
cm_trajectory[0,0] = model_recovery_random_effects[:2,:2].sum()
cm_trajectory[0,1] = model_recovery_random_effects[:2,2:].sum()
cm_trajectory[1,0] = model_recovery_random_effects[2:,:2].sum()
cm_trajectory[1,1] = model_recovery_random_effects[2:,2:].sum()
cm_trajectory = (cm_trajectory.T/cm_trajectory.sum(axis=1)).T
print("Trajectory vs non_trajectory")
trajectory_accuracy = (cm_trajectory[0,0] + cm_trajectory[1,1])/2
trajectory_precision = cm_trajectory[0,0]/(cm_trajectory[0,0] + cm_trajectory[1,0])
trajectory_recall = cm_trajectory[0,0]/(cm_trajectory[0,0] + cm_trajectory[0,1])
trajectory_fscore = (2*trajectory_precision*trajectory_recall)/(trajectory_precision + trajectory_recall)
print("F-score: ", trajectory_fscore.round(2))
print("diffusion misclassified as momentum: ", (model_recovery_random_effects[0,1]*100).round(1), "%")
print("momentum misclassified as diffusion: ", (model_recovery_random_effects[1,0]*100).round(1), "%")
###Output
diffusion misclassified as momentum: 6.0 %
momentum misclassified as diffusion: 25.5 %
###Markdown
Deviance explained results
###Code
all_deviance_explained = deviance_explained_results[1,1].results
for session in meta.Session_List[1:]:
all_deviance_explained = all_deviance_explained.append(deviance_explained_results[session.rat,session.day].results)
all_deviance_explained_with_popburst = all_deviance_explained[~np.any(np.isnan(all_deviance_explained), axis=1)]
total_SWRs_with_popburst = len(all_deviance_explained_with_popburst)
deviance_explained_x_best_fit = np.sort(all_deviance_explained_with_popburst.max(axis=1))
deviance_explained_x_random = np.sort(all_deviance_explained_with_popburst['random'])
deviance_explained_y = np.arange(total_SWRs_with_popburst)/total_SWRs_with_popburst
print(f"Deviance explained best fit: {deviance_explained_x_best_fit.mean().round(3)} +/- {deviance_explained_x_best_fit.std().round(3)}")
print(f"Deviance explained random: {deviance_explained_x_random.mean().round(3)} +/- {deviance_explained_x_random.std().round(3)}")
print(sp.ttest_rel(deviance_explained_x_best_fit, deviance_explained_x_random), f"deg of freedom={total_SWRs_with_popburst}")
###Output
Deviance explained best fit: 0.234 +/- 0.07
Deviance explained random: 0.099 +/- 0.05
Ttest_relResult(statistic=339.87712364258266, pvalue=0.0) deg of freedom=2883
###Markdown
Make figure
###Code
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
# From https://stackoverflow.com/a/18926541
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
cmap_use = truncate_colormap(matplotlib.cm.magma, minval=.1, maxval=1)
axis_label_fontsize = 6
letter_ticks_fontsize = 6
number_ticks_fontsize = 5
legend_fontsize=5
model_names_dict = {'diffusion': 'Diffusion',
'momentum': 'Momentum',
'stationary': 'Stationary',
'stationary_gaussian': 'Gaussian',
'random': 'Random'}
model_names = [model_names_dict[model] for model in meta.MODELS_AS_STR]
temporal_model_colors = {True: '#9e1f63', False: '#fbb040'}
model_colors = [temporal_model_colors[model.trajectory] for model in meta.MODELS]
previous_classification_colors = {'classified': 'seagreen', 'not_classified': 'darkgray'}
figure_width = 7.2
figure_height = 2.1
margin = 0.06
header_height = 0.1
footer_height = 0.45
w_spacing = 0.09
# define plot dimensions (relative to a "1x1" plot for a marginal)
panel1_width = .17
panel2_width = .12
panel3_width = .17#.2
panel4_width = .14
panel_height = 1 - header_height - footer_height
panel1_left = margin
panel2_left = margin + panel1_width + w_spacing
panel3_left = margin + panel1_width + panel2_width + 2*w_spacing + .04
panel4_left = margin + panel1_width + panel2_width + panel3_width + 3*w_spacing + .04
panel_bottom = footer_height
fig = plt.figure(
figsize=(figure_width, figure_height), dpi=200, facecolor="w", edgecolor="k"
)
ax = dict()
# model comparison summary
x_axis = np.arange(meta.N_MODELS)+.5
i = 0
ax[i] = fig.add_axes([panel1_left, panel_bottom, panel1_width, panel_height])
ax[i].spines["right"].set_visible(False)
ax[i].spines["top"].set_visible(False)
ax[i].set_xticks(x_axis)
ax[i].set_xticklabels(model_names, rotation=45, ha='right', fontsize=letter_ticks_fontsize)
ax[i].set_xlim([0,meta.N_MODELS-.05])
ax[i].tick_params(axis='y', labelsize=number_ticks_fontsize)
ax[i].set_ylabel("p($M$ | all SWRs)", fontsize=axis_label_fontsize)
ax[i].set_xlabel("Dynamics Model", fontsize=axis_label_fontsize)
ax[i].set_ylim([0,1])
ax[i].tick_params(axis='x', pad=.7)
custom_patches = [matplotlib.patches.Patch(facecolor=temporal_model_colors[True], alpha=.9,label='Trajectory\nmodel'),
matplotlib.patches.Patch(facecolor=temporal_model_colors[False], alpha=.9,label='Non-trajectory\nmodel'),
]
ax[i].legend(
handles=custom_patches,
fontsize=legend_fontsize,
bbox_to_anchor=(.5, .95, 0, 0),
ncol=1,
frameon=False)
ax[i].bar(x_axis, p_models_mean, yerr=p_models_sd,
width=.5, color=model_colors, alpha=.9, error_kw=dict(lw=.7, zorder=101))
for s in range(meta.N_SESSIONS):
ax[i].scatter(x_axis+(np.random.rand()-.5)/8, p_models[s], s=5, color='dimgray', zorder=100, marker='.', linewidths=0)
# trajectory v. non-trajectory
x_axis = np.arange(2) + .5
i = 1
ax[i] = fig.add_axes([panel2_left, panel_bottom, panel2_width, panel_height])
ax[i].spines["right"].set_visible(False)
ax[i].spines["top"].set_visible(False)
ax[i].tick_params(axis='y', labelsize=number_ticks_fontsize)
ax[i].set_xticks(x_axis)
ax[i].set_xticklabels(["Our\nMethod", "Traditional\nMethod"], ha='center', fontsize=letter_ticks_fontsize)
ax[i].set_xlim([0,2])
ax[i].set_ylabel("Fraction SWRs", fontsize=axis_label_fontsize)
ax[i].set_ylim([0,1])
bar_width = .35
alpha=.9
ax[i].bar(x_axis, [percent_trajectory_model_mean, percent_significant_ripples_mean], yerr=[percent_trajectory_model_sd, percent_significant_ripples_sd],
width=bar_width, color=[temporal_model_colors[True], previous_classification_colors['classified']], alpha=alpha, error_kw=dict(lw=.8, zorder=101))
ax[i].bar(x_axis, [1-percent_trajectory_model_mean, 1-percent_significant_ripples_mean], bottom=[percent_trajectory_model_mean, percent_significant_ripples_mean],
width=bar_width, color=[temporal_model_colors[False], previous_classification_colors['not_classified']], alpha=alpha)
for s in range(meta.N_SESSIONS):
ax[i].scatter(x_axis+(np.random.rand()-.5)/8, [percent_trajectory_model[s], percent_significant_ripples[s]], s=5, color='dimgray', zorder=100, marker='.', linewidths=0)
custom_lines_1 = [matplotlib.lines.Line2D([0], [0], color=temporal_model_colors[True], lw=4, linestyle='--', alpha=alpha),
matplotlib.lines.Line2D([0], [0], color=temporal_model_colors[False], lw=4, linestyle='--', alpha=alpha)]
custom_lines_2 = [matplotlib.lines.Line2D([0], [0], color=previous_classification_colors['classified'], lw=4, linestyle='--', alpha=alpha),
matplotlib.lines.Line2D([0], [0], color=previous_classification_colors['not_classified'], lw=4, linestyle='--', alpha=alpha)]
leg_1 = ax[i].legend(
handles=custom_lines_2,
labels=['Previously\nclassified\ntrajectory', 'Not\nclassified'],
bbox_to_anchor=(1.3, -.3, 0, 0), fontsize=legend_fontsize,
ncol=1,
frameon=False,
labelspacing=.8)
leg_2 = ax[i].legend(
handles=custom_lines_1,
labels=['Trajectory\nmodel', 'Non-trajectory\nmodel'],
bbox_to_anchor=(.6, -.35, 0, 0), fontsize=legend_fontsize,
ncol=1,
frameon=False,
labelspacing=.8)
ax[i].add_artist(leg_1)
# confusion matrix from simulated data
xy_axis = np.arange(meta.N_MODELS)
i = 2
ax[i] = fig.add_axes([panel3_left, panel_bottom, panel3_width, panel_height])
ax[i].set_xticks(xy_axis)
ax[i].set_xticklabels(model_names, rotation=45, ha='right', fontsize=letter_ticks_fontsize-1)
ax[i].set_yticks(xy_axis)
ax[i].set_yticklabels(model_names, rotation=0, ha='right', fontsize=letter_ticks_fontsize-1)
ax[i].set_ylabel("Generative dynamics", fontsize=axis_label_fontsize)
ax[i].set_xlabel("Inferred dynamics", fontsize=axis_label_fontsize)
im = ax[i].imshow(model_recovery_random_effects, vmin=0, vmax=1, cmap=cmap_use)
cbar = fig.colorbar(im, ax=ax[i], location='right', shrink=.95, pad=.07)
cbar.ax.set_title(' p($M$|X$_{sim}$)', fontsize=6, pad=4)
cbar.ax.tick_params(labelsize=5)
# deviance explained
i = 3
ax[i] = fig.add_axes([panel4_left, panel_bottom, panel4_width, panel_height])
ax[i].spines["right"].set_visible(False)
ax[i].spines["top"].set_visible(False)
ax[i].tick_params(axis='both', labelsize=number_ticks_fontsize)
ax[i].set_ylabel("Fraction SWRs", fontsize=axis_label_fontsize)
ax[i].set_xlabel("Deviance explained", fontsize=axis_label_fontsize)
ax[i].set_ylim([0,1])
ax[i].set_xlim([0,.5])
ax[i].plot(deviance_explained_x_best_fit[deviance_explained_x_best_fit >0],
deviance_explained_y[deviance_explained_x_best_fit >0],
color='crimson', alpha=.8, label='Best fit model', clip_on=False)
ax[i].plot(deviance_explained_x_random[deviance_explained_x_random >0],
deviance_explained_y[deviance_explained_x_random >0],
color='gray', label='Random model', clip_on=False)
ax[i].legend(frameon=False, fontsize=legend_fontsize, loc=4, bbox_to_anchor=(1.25,0,0,0))
ax[0].text(-.3,1.1, "a", size=8, transform=ax[0].transAxes, fontweight='bold')
ax[1].text(-.45,1.1, "b", size=8, transform=ax[1].transAxes, fontweight='bold')
ax[2].text(-.7,1.1, "c", size=8, transform=ax[2].transAxes, fontweight='bold')
ax[3].text(-.4,1.1, "d", size=8, transform=ax[3].transAxes, fontweight='bold')
plt.savefig(os.path.join(meta.FIGURES_PATH, "Figure3.svg"), dpi=500, transparent=True)
rect = plt.Rectangle(
# (lower-left corner), width, height
(0, 0), 1, 1, fill=False, color="k", lw=.5, alpha=.2,
zorder=1000, transform=fig.transFigure, figure=fig
)
fig.patches.extend([rect])
summary_df = read_write.load_descriptive_stats(meta.Ripple_Data.default_time_window_ms, meta.Ripple_Data.name, meta.Ripple_Data.default_likelihood_function)
SWRs_with_spiking_data = summary_df[~np.isnan(summary_df['current_location_x'])]
n_SWRs_with_spiking_data = len(SWRs_with_spiking_data)
print("# total SWRs: ", n_SWRs_with_spiking_data)
SWRs_without_popburst = SWRs_with_spiking_data[np.isnan(SWRs_with_spiking_data['avg_fr'])]
SWRs_with_popburst = SWRs_with_spiking_data[~np.isnan(SWRs_with_spiking_data['avg_fr'])]
n_SWRs_without_popburst = len(SWRs_without_popburst)
n_SWRs_with_popburst = len(SWRs_with_popburst)
print("total # SWRs with popburst: ", n_SWRs_with_popburst)
print("# SWRs without population bursts: ", n_SWRs_without_popburst)
print("% SWRs without population bursts: ",np.round(n_SWRs_without_popburst/n_SWRs_with_spiking_data*100,1), "%")
SWRs_previously_classified = SWRs_with_spiking_data[SWRs_with_spiking_data['map_classified_PF']==1]
nontrajectory_previously_classified = SWRs_previously_classified[SWRs_previously_classified['trajectory_model'] == 0]
n_previously_classified = (SWRs_with_spiking_data['map_classified_PF']==1).sum()
n_nontrajectory_previously_classified = len(nontrajectory_previously_classified)
print('# previously classified trajectories not classifed as trajectory model: ', np.round(n_nontrajectory_previously_classified/n_previously_classified*100,1), "%")
print('# previously classified trajectories not classifed as trajectory model: ', n_nontrajectory_previously_classified)
print('# previously classified trajectories that do not meat popburst criteria: ', (np.isnan(nontrajectory_previously_classified['avg_fr']).sum()))
nontrajectory_previously_classified['best_fit_model'].value_counts()
n_trajectory_SWRs = (SWRs_with_popburst['trajectory_model'] ==1).sum()
print("# SWRs classified as trajectory-model: ", n_trajectory_SWRs)
print("% of # SWRs with popburst classified as trajectory-model: ", np.round(n_trajectory_SWRs/n_SWRs_with_popburst*100, 1), "%")
###Output
# SWRs classified as trajectory-model: 2366
% of # SWRs with popburst classified as trajectory-model: 82.1 %
|
dgl_intro.ipynb | ###Markdown
Graph Neural NetworksBy David Rose The TL;DR What?A graph is a method of representing a network data and the connections between. In the most basic form you have two parts that make up a graph: **nodes** and **edges**.The nodes represent the samples of data and the edges represent some sort of link between them. Sometimes the link (edge) can be a single obvious property connecting multiple objects such as:- Distances between cities in miles- Friendships on Facebook- Citations between papers on ArxivOr sometimes the network can be connected by multiple relevant attributes. With a supply chain network you will have routes that can be described by:- The trucks that deliver on routes between warehouses- The type of route (sea, land, air)- The average time or cost to transfer between locations Why?Compared to tabular datasets there is no assumption of IID, rather the intent with graphs is the express purpose of the samples of data being related in some way or another. How?We can transition a graph to the form of a typical machine learning problem by giving both the nodes and the edges their own features, and then performing the task of classifying a label on a specific node or a graph as a whole. Maybe for COVID contact tracing we are wanting to find who may have had contact with a specific person (node) but we only have partial knowledge of contact so far (the labels true/false) based on the edge features (distance in where they live) and node features (demographics, job type, common locations visited).Features can be anything you would normally use in a dataset:- numerical embeddings of words on a webage- pixel values of images- one-hot encoding of categories Some examples of graph networks| Type of Network | Nodes | Node Features | Edges | Edge Features ||-----------------|-----------------|-----------------------------------------------------------------------|--------------|----------------------------------------------------------------------|| Traffic Flow | addresses | residential/business, amount of parking, size of lot | roads | lane count, road width, average traffic flow || Flights | airports | airlines, terminals, geography, city population, connecting transit | routes | distance, planes, frequency, costs, passengers || Banking | account holders | name, demographics, products, balances, credit report | transactions | type, amount, timestamp, location, device || Social | users | name, demographics, photos, connections, comments | interactions | interaction type, timestamp, content || Physicians | doctors | name, demographics, specialty, licenses, affiliations | patients | names, demographics, medical history, referred by, insurance, health || Supply Chain | warehouses | location, size, capacity, products stored, connections, geography | trucks | size, year, model, permits, driver experience, load capacity | The Task Node ClassificationOne of the most common tasks performed with GNNs. The basic idea is that we can take a specific reference node, in this case node A, and perform neighborhood aggregation on the surrounding nodes and the edges between them. What are the network layers?- **Nodes**: Recurrent networks- **Edges**: feed-forward networks What is the process?**Message passing**: In performing the neighborhood aggregation, we pass messages (or embeddings) between the surrounding nodes in regards to our reference node A. This in effect causes the information embedded in the nodes and edges to began filtering through the network, where they begin to learn from their neighbors. |  ||:--:| | *Source: https://web.stanford.edu/class/cs224w/slides/08-GNN.pdf* | Simple steps:1. Neighboring nodes pass their messages (embeddings) through the edge networks into the recurrent network on the reference node.2. The new embedding of the reference recurrent unit is updated by applying the recurrent function on the current embedding and a summation of the edge network outputs from neighboring nodes. |  ||:--:| | *Source: https://medium.com/dair-ai/an-illustrated-guide-to-graph-neural-networks-d5564a551783* Then what?Once you have performed this step a few times, we now have a set of new embeddings, different from than we began. So the nodes now have their original information, along with an aggregation of the information contained within their surrounding nodes. We can then take this information and and send them further along other layers in a pipeline, or sum up all the embeddings to get a vector **H** that represents the whole graph. |  ||:--:| | *Source: https://medium.com/dair-ai/an-illustrated-guide-to-graph-neural-networks-d5564a551783* Math Notation State of each nodeEach node is represented by the state of it's neighborhood below  - **x_v**: The node feature- **x_co[v]**: Features of the edges connecting with v- **h_ne[v]**: Embeddings of the neighboring nodes of v- **x_nv[v]**: Features of the neighboring nodes of v- **f**: The transition function that projects these inputs into a d-dimensional space State of aggregate nodesH and X denote the concatenation of all the h and x values as an iterative update process  Typical Sampling Process |  ||:--:| | *Source: https://web.stanford.edu/class/cs224w/slides/08-GNN.pdf* | ------------- Code Example
###Code
!pip install dgl torch > logs/bash_log.txt
###Output
_____no_output_____
###Markdown
PyTorch or TensorFlow?When importing *dgl* we can specify the backend to use with the environmental variable `DGLBACKEND`.
###Code
#%env DGLBACKEND='tensorflow'
%env DGLBACKEND='pytorch'
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.data
USE_GPU = False
###Output
env: DGLBACKEND='pytorch'
###Markdown
Dataset (Reddit)Using the Reddit dataset which has already been processed and ready for download. It is a collection of posts made during the month of September 2014. The label is the subreddit name for the node (post) and nodes are connected if the same user comments on both. SamplingWe use the first 20 days for training and the remaining days for testing (with 30% used for validation).
###Code
dataset = dgl.data.RedditDataset()
print('Number of categories:', dataset.num_classes)
g = dataset[0]
print('\nNode features')
print(g.ndata.keys())
print('\nEdge features')
print(g.edata.keys())
print(f"\nTotal nodes: {g.num_nodes():,}")
print(f"Total edges: {g.num_edges():,}")
###Output
Number of categories: 41
Node features
dict_keys(['label', 'feat', 'test_mask', 'train_mask', 'val_mask'])
Edge features
dict_keys([])
Total nodes: 232,965
Total edges: 114,615,892
###Markdown
The ModelHere we put together a simple two-layer Graph Convolutional Network (GCN). Each layer computes new node representations by aggregating neighbor information.DGL layers work easily within PyTorch and can be stacked along with standard PyTorch layers.
###Code
from dgl.nn import GraphConv
class GCN(nn.Module):
def __init__(self, in_feats, h_feats, num_classes):
super(GCN, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, num_classes)
def forward(self, g, in_feat):
h = self.conv1(g, in_feat)
h = F.relu(h)
h = self.conv2(g, h)
return h
# Create the model with given dimensions
model = GCN(g.ndata['feat'].shape[1], 16, dataset.num_classes)
###Output
_____no_output_____
###Markdown
TrainingThe training process is basically similar to any other PyTorch training loop.
###Code
def train(g, model):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
best_val_acc = 0
best_test_acc = 0
features = g.ndata['feat']
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
for e in range(50):
# Forward
logits = model(g, features)
# Compute prediction
pred = logits.argmax(1)
# Compute loss
# Only compute the losses of the nodes in the training set
loss = F.cross_entropy(logits[train_mask], labels[train_mask])
# Compute accuracy on training/validation/test
train_acc = (pred[train_mask] == labels[train_mask]).float().mean()
val_acc = (pred[val_mask] == labels[val_mask]).float().mean()
test_acc = (pred[test_mask] == labels[test_mask]).float().mean()
# Save best validation accuracy and corresponding test accuracy
if best_val_acc < val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if e % 5 == 0:
print(
'epoch {}, loss: {:.3f}, val acc: {:.3f} (best {:.3f}),\
test acc: {:.3f} (best {:.3f})'.format(
e, loss, val_acc, best_val_acc, test_acc, best_test_acc))
if USE_GPU:
g = g.to('cuda')
model = GCN(g.ndata['feat'].shape[1], 16, dataset.num_classes).to('cuda')
else:
model = GCN(g.ndata['feat'].shape[1], 16, dataset.num_classes)
train(g, model)
###Output
epoch 0, loss: 3.755, val acc: 0.011 (best 0.011), test acc: 0.011 (best 0.011)
epoch 5, loss: 2.773, val acc: 0.362 (best 0.362), test acc: 0.362 (best 0.362)
epoch 10, loss: 2.177, val acc: 0.598 (best 0.598), test acc: 0.595 (best 0.595)
epoch 15, loss: 1.716, val acc: 0.665 (best 0.665), test acc: 0.660 (best 0.660)
epoch 20, loss: 1.355, val acc: 0.759 (best 0.759), test acc: 0.754 (best 0.754)
epoch 25, loss: 1.083, val acc: 0.824 (best 0.824), test acc: 0.820 (best 0.820)
epoch 30, loss: 0.888, val acc: 0.857 (best 0.857), test acc: 0.853 (best 0.853)
epoch 35, loss: 0.756, val acc: 0.894 (best 0.894), test acc: 0.891 (best 0.891)
epoch 40, loss: 0.661, val acc: 0.902 (best 0.902), test acc: 0.899 (best 0.899)
epoch 45, loss: 0.593, val acc: 0.912 (best 0.912), test acc: 0.909 (best 0.909)
###Markdown
Save the trained graphOnce the model is trained we can easily save it and load back later on with the built-in functions.
###Code
# Save graphs
dgl.save_graphs('graph.dgl', g)
# Load graphs
(g,), _ = dgl.load_graphs('graph.dgl')
print(g)
###Output
Graph(num_nodes=232965, num_edges=114615892,
ndata_schemes={'val_mask': Scheme(shape=(), dtype=torch.uint8), 'train_mask': Scheme(shape=(), dtype=torch.uint8), 'test_mask': Scheme(shape=(), dtype=torch.uint8), 'label': Scheme(shape=(), dtype=torch.int64), 'feat': Scheme(shape=(602,), dtype=torch.float32)}
edata_schemes={})
|
Data Science Michigan/Introduction to Data Science/Week2/week2.ipynb | ###Markdown
---_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._--- The Series Data Structure
###Code
import pandas as pd
number = [1,2,3]
pd.Series(number)
numbers = [1, 2, 3]
pd.Series(numbers)
animals = ['Tiger', 'Bear', None]
pd.Series(animals)
numbers = [1, 2, None]
pd.Series(numbers)
import numpy as np
np.nan == None
np.nan == np.nan
np.isnan(np.nan)
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.index
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
###Output
_____no_output_____
###Markdown
Querying a Series
###Code
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.iloc[3]
s.loc['Sumo']
s[3]
s['Golf']
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = pd.Series(sports)
s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead
s = pd.Series([100.00, 120.00, 101.00, 3.00])
# s
total = 0
for item in s:
total+=item
print(total)
import numpy as np
total = np.sum(s)
print(total)
a = pd.Series([100,200])
print(a)
np.sum(a)
len(s)
%%timeit -n 100
summary = 0
for item in s:
summary+=item
%%timeit -n 100
summary = np.sum(s)
s+=2 #adds two to each item in s using broadcasting
s.head(n = 1)
for label, value in s.iteritems():
s.set_value(label, value+2)
s.head()
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for label, value in s.iteritems():
s.loc[label]= value+2
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
s+=2
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Bears'
s
original_sports = pd.Series({'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'})
cricket_loving_countries = pd.Series(['Australia',
'Barbados',
'Pakistan',
'England'],
index=['Cricket',
'Cricket',
'Cricket',
'Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
original_sports
cricket_loving_countries
all_countries
all_countries.loc['Cricket']
###Output
_____no_output_____
###Markdown
The DataFrame Data Structure
###Code
import pandas as pd
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df.head()
df.loc['Store 2']
type(df.loc['Store 2'])
df.loc['Store 1']
df.loc['Store 1', 'Cost']
df.T
df.T.loc['Cost']
df['Cost']
df.loc['Store 1']['Cost']
df.loc[:,['Name', 'Cost']]
df.drop('Store 1')
df
copy_df = df.copy()
copy_df = copy_df.drop('Store 1')
copy_df
copy_df.drop?
del copy_df['Name']
copy_df
df['Location'] = None
df
###Output
_____no_output_____
###Markdown
Dataframe Indexing and Loading
###Code
costs = df['Cost']
costs
costs+=2
costs
df
!cat olympics.csv
df = pd.read_csv('olympics.csv')
df.head()
df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1)
df.head()
df.columns
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold' + col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver' + col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#' + col[1:]}, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Querying a DataFrame
###Code
df['Gold'] > 0
only_gold = df.where(df['Gold'] > 0)
only_gold.head()
only_gold['Gold'].count()
df['Gold'].count()
only_gold = only_gold.dropna()
only_gold.head()
only_gold = df[df['Gold'] > 0]
only_gold.head()
len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])
df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]
###Output
_____no_output_____
###Markdown
Indexing Dataframes
###Code
df.head()
df['country'] = df.index
df = df.set_index('Gold')
df.head()
df = df.reset_index()
df.head()
df = pd.read_csv('census.csv')
df.head()
df['SUMLEV'].unique()
df=df[df['SUMLEV'] == 50]
df.head()
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
df.head()
df = df.set_index(['STNAME', 'CTYNAME'])
df.head()
df.loc['Michigan', 'Washtenaw County']
df.loc[ [('Michigan', 'Washtenaw County'),
('Michigan', 'Wayne County')] ]
###Output
_____no_output_____
###Markdown
Missing values
###Code
df = pd.read_csv('log.csv')
df
df.fillna?
df = df.set_index('time')
df = df.sort_index()
df
df = df.reset_index()
df = df.set_index(['time', 'user'])
df
df = df.fillna(method='ffill')
df.head()
###Output
_____no_output_____ |
CNN_TensorFLow.ipynb | ###Markdown
Melanoma detection using TensorFlow
###Code
Hi
###Output
_____no_output_____
###Markdown
Melanoma detection using TensorFlow
###Code
###Output
_____no_output_____ |
wikipedia-table.ipynb | ###Markdown
Wikipediaから一覧表を取得 Setup
###Code
__author__ = "Admin GGCS"
__copyright__ = "Copyright 2019"
__license__ = "MIT"
__version__ = "2.0.1"
__maintainer__ = "Admin GGCS"
__website__ = "ggcs.io"
import requests
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
Main Part 対象URL入力、requests.models.Responseに変換
###Code
#pd.read_html()は多くの場合は直接ページ(myUrl)を読み込めるけれど、
#相手のencodingの関係でエラーになる場合もあるので念のためrequests.models.Responseに変換しておく。
try:
myUrl = input('URL? (ex. https://www.example.com) >> ')
response = requests.get(myUrl)
except Exception as exc_msg:
print("Error!{}".format(exc_msg))
#入力例: https://ja.wikipedia.org/wiki/生物の分類
###Output
URL? (ex. https://www.example.com) >> https://ja.wikipedia.org/wiki/生物の分類
###Markdown
🐼 I love Pandas!
###Code
#カッコ内はresponse.textでも良いが、response.contentの方が良さげ。
#っていうか、pd.read_html()なんて凄いのがあるのを一昨日の夜に知った。早く教えてよ😭
myList = pd.read_html(response.content)
#myListのtypeはlistなのに、切り出すとData Frame になる不思議。
#myList=[df_0, df_1,..., df_n]のように、Data Frameのlistになっている。
#こんな感じ。
print('myList : ', type(myList), '\nmyList[0]: ', type(myList[0]))
###Output
myList : <class 'list'>
myList[0]: <class 'pandas.core.frame.DataFrame'>
###Markdown
これで良いか、printして様子を見てみる。
###Code
#下のセルに表示されているのは、'https://ja.wikipedia.org/wiki/生物の分類'をスクレイピングした時の例
for i,j in enumerate(myList):
print('[Table ' + str(i).zfill(3) + ']\n', j)
###Output
[Table 000]
0 1
0 NaN ウィキペディアにおける生物の分類の扱いについては、Wikipedia:ウィキプロジェクト 生...
[Table 001]
0 1 2 3 \
0 リンネ(1735年)2界説 ヘッケル(1894年)3界説 ホイタッカー(1969年)5界説 ウーズ(1977年)6界説
1 NaN 原生生物界 モネラ界 真正細菌界
2 NaN 原生生物界 モネラ界 古細菌界
3 NaN 原生生物界 原生生物界 原生生物界
4 植物界 植物界 菌界 菌界
5 植物界 植物界 植物界 植物界
6 動物界 動物界 動物界 動物界
4 5
0 ウーズ(1990年)3ドメイン説[6] 具体例[7]
1 細菌 大腸菌、放線菌、藍色細菌
2 古細菌(アーキア) メタン生成菌、好熱好酸菌
3 真核生物 藻類、原生動物、変形菌類
4 真核生物 キノコ、カビ、地衣植物
5 真核生物 コケ類、シダ類、種子植物
6 真核生物 無脊椎動物、脊椎動物
[Table 002]
Unnamed: 0 クラスター スーパーグループ \
0 真 核 生 物 アモルフェア Amorphea オピストコンタOpisthokonta
1 真 核 生 物 アモルフェア Amorphea オピストコンタOpisthokonta
2 真 核 生 物 アモルフェア Amorphea アメーボゾアAmoebozoa
3 真 核 生 物 エクスカバータ Excavata エクスカバータ Excavata
4 真 核 生 物 エクスカバータ Excavata エクスカバータ Excavata
5 真 核 生 物 エクスカバータ Excavata エクスカバータ Excavata
6 真 核 生 物 ディアフォレティケスDiaphoretickes アーケプラスチダArchaeplastida
7 真 核 生 物 ディアフォレティケスDiaphoretickes アーケプラスチダArchaeplastida
8 真 核 生 物 ディアフォレティケスDiaphoretickes アーケプラスチダArchaeplastida
9 真 核 生 物 ディアフォレティケスDiaphoretickes SAR Sar
10 真 核 生 物 ディアフォレティケスDiaphoretickes SAR Sar
11 真 核 生 物 ディアフォレティケスDiaphoretickes SAR Sar
スーパーグループ.1 下位分類、具体例
0 オピストコンタOpisthokonta ホロゾア Holozoa(動物、襟鞭毛虫 など)
1 オピストコンタOpisthokonta Nucletmycea(菌類 など)
2 アメーボゾアAmoebozoa ツブリネア Tubulinea、古アメーバ類 Archamoebae、原生粘菌(プロトステリ...
3 エクスカバータ Excavata メタモナス類 Metamonada(フォルニカータ、パラバサリア、プレアクソスチラ)
4 エクスカバータ Excavata Discoba(ユーグレノゾア、ヘテロロボサ、ジャコバ類 など)
5 エクスカバータ Excavata マラウィモナス Malawimonas
6 アーケプラスチダArchaeplastida 緑色植物 Chloroplastida(緑藻植物、陸上植物など)
7 アーケプラスチダArchaeplastida 紅藻 Rhodophyceae
8 アーケプラスチダArchaeplastida 灰色藻 Glaucophyta
9 ストラメノパイル Stramenopiles 不等毛植物(褐藻、珪藻 、ラフィド藻、黄金色藻、黄緑藻など)、 オパリナ類、ビコソエカ類、ラ...
10 アルベオラータ Alveolata 渦鞭毛藻、アピコンプレクサ、繊毛虫 など
11 リザリア Rhizaria クロララクニオン藻、有孔虫、放散虫 など
[Table 003]
Empty DataFrame
Columns: [生物 ドメイン 界(かい) 門(もん) 綱(こう) 目(もく) 科(か) 属(ぞく) 種(しゅ)]
Index: []
[Table 004]
0 1 2 3 \
0 和名 英名 ラテン語名 例:ヒト
1 ドメイン: domain: regio: 真核生物
2 界: kingdom: regnum: 動物界
3 門: phylum/division: phylum/divisio: 脊索動物門(脊椎動物亜門)
4 綱: class: classis: 哺乳綱
5 目: order: ordo: サル目
6 科: family: familia: ヒト科
7 属: genus: genus: ヒト属Homo
8 種: species: species: H. sapiens
4 5 6 7
0 例:ローズマリー 例:エノキタケ 例:大腸菌 例:A. ペルニクス
1 真核生物 真核生物 細菌 古細菌
2 植物界 菌界 なし プロテオ古細菌界[10]
3 被子植物門 担子菌門 プロテオバクテリア門 クレン古細菌門
4 双子葉植物綱 菌蕈綱 γプロテオバクテリア綱 テルモプロテウス綱
5 シソ目 ハラタケ目 腸内細菌目 デスルフロコックス目
6 シソ科 キシメジ科 腸内細菌科 デスルフロコックス科
7 ローズマリー属Rosemarinus エノキタケ属Flammulina エスケリキア属Escherichia アエロピュルム属Aeropyrum
8 R. officinalis F. velutipes E. coli A. pernix
[Table 005]
0 1 2 3 4 5 \
0 NaN 分類単位Taxon 植物Plants 藻Algae 菌Fungi 動物Animals
1 門 Division/Phylum -phyta -phyta -mycota NaN
2 亜門 Subdivision/Subphylum -phytina -phytina -mycotina NaN
3 綱 Class -opsida -phyceae -mycetes NaN
4 亜綱 Subclass -idae -phycidae -mycetidae NaN
5 目 Order -ales -ales -ales NaN
6 亜目 Suborder -ineae -ineae -ineae NaN
7 上科 Superfamily -acea -acea -acea -oidea
8 科 Family -aceae -aceae -aceae -idae
9 亜科 Subfamily -oideae -oideae -oideae -inae
10 族(連) Tribe -eae -eae -eae -ini
11 亜族(亜連) Subtribe -inae -inae -inae -ina
6
0 細菌、古細菌Bacteria, Archaea
1 NaN
2 NaN
3 (-ia)
4 (-idae)
5 -ales
6 -ineae
7 NaN
8 -aceae
9 -oideae(現在使用されていない)
10 -eae(同上)
11 -inae(同上)
[Table 006]
0 1
0 NaN この節の加筆が望まれています。 (2018年8月)
###Markdown
これで良いのだ。CSVファイルに書き出す。おしまい。
###Code
try:
goornogo = input(str(len(myList))+' csv files will be generated. OK? ([y]/n) >> ').lower()
if goornogo != 'n':
if os.path.isfile('output_000.csv'):
print('Oops! output_000.csv already exixts!\n*** ABORTED ***')
os.system('open .')#unix系用。Windows用は適宜書き換えてください。
else:
for i,j in enumerate(myList):
j.to_csv('output_'+str(i).zfill(3)+'.csv')
print('Done!')
os.system('open .') #unix系用。Windows用は適宜書き換えてください。
else: print('Bye!')
except Exception as exc_msg:
print("Error!{}".format(exc_msg))
###Output
7 csv files will be generated. OK? ([y]/n) >> y
Done!
###Markdown
え?Excelがいいの?
###Code
try:
goornogo = input(str(len(myList))+' xlsx files will be generated. OK? ([y]/n) >> ').lower()
if goornogo != 'n':
if os.path.isfile('output_000.xlsx'):
print('Oops! output_000.xlsx already exixts!\n*** ABORTED ***')
os.system('open .')#unix系用。Windows用は適宜書き換えてください。
else:
for i,j in enumerate(myList):
j.to_excel('output_'+str(i).zfill(3)+'.xlsx')
print('Done!')
os.system('open .')#unix系用。Windows用は適宜書き換えてください。
else: print('Bye!')
except Exception as exc_msg:
print("Error!{}".format(exc_msg))
###Output
7 xlsx files will be generated. OK? ([y]/n) >> y
Done!
|
Notebooks/ConvRecur_113.ipynb | ###Markdown
ORF recognition by Convolutional/RecurrentTest CNN+LSTM 32 on simulated RNA of length 128. Use restructured codebase from notebook 105.
###Code
import time
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
PC_SEQUENCES=32000 # how many protein-coding sequences
NC_SEQUENCES=32000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
RNA_LEN=128 # how long is each sequence
CDS_LEN=64 # min CDS len to be coding
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (RNA_LEN,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (RNA_LEN,ALPHABET) # Conv1D needs 2D inputs
FILTERS = 64 # how many different patterns the model looks for
CELLS = 64
NEURONS = 64
DROP_RATE = 0.4
WIDTH = 3 # how wide each pattern is, in bases
STRIDE_2D = (1,1) # For Conv2D how far in each direction
STRIDE = 1 # For Conv1D, how far between pattern matches, in bases
EPOCHS=100 # how many times to train on all the data
SPLITS=3 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=3 # train the model this many times (range 1 to SPLITS)
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
#drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import ORF_counter
from RNA_describe import Random_Base_Oracle
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import prepare_inputs_len_x_alphabet
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter,Random_Base_Oracle
from SimTools.RNA_prep import prepare_inputs_len_x_alphabet
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Conv1D,Conv2D
from keras.layers import GRU,LSTM
from keras.layers import Flatten,TimeDistributed
from keras.layers import MaxPooling1D,MaxPooling2D
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
rbo=Random_Base_Oracle(RNA_LEN,True)
pc_all,nc_all = rbo.get_partitioned_sequences(CDS_LEN,10) # just testing
pc_all,nc_all = rbo.get_partitioned_sequences(CDS_LEN,PC_SEQUENCES+PC_TESTS)
print("Use",len(pc_all),"PC seqs")
print("Use",len(nc_all),"NC seqs")
# Describe the sequences
def describe_sequences(list_of_seq):
oc = ORF_counter()
num_seq = len(list_of_seq)
rna_lens = np.zeros(num_seq)
orf_lens = np.zeros(num_seq)
for i in range(0,num_seq):
rna_len = len(list_of_seq[i])
rna_lens[i] = rna_len
oc.set_sequence(list_of_seq[i])
orf_len = oc.get_max_orf_len()
orf_lens[i] = orf_len
print ("Average RNA length:",rna_lens.mean())
print ("Average ORF length:",orf_lens.mean())
print("Simulated sequences prior to adjustment:")
print("PC seqs")
describe_sequences(pc_all)
print("NC seqs")
describe_sequences(nc_all)
pc_train=pc_all[:PC_SEQUENCES]
nc_train=nc_all[:NC_SEQUENCES]
pc_test=pc_all[PC_SEQUENCES:]
nc_test=nc_all[NC_SEQUENCES:]
# Use code from our SimTools library.
X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles
print("Data ready.")
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=INPUT_SHAPE,output_dim=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same",
input_shape=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
#dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
#dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
#dnn.add(MaxPooling1D())
#dnn.add(TimeDistributed(Flatten()))
dnn.add(LSTM(CELLS,return_sequences=True))
dnn.add(LSTM(CELLS,return_sequences=False))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
do_cross_validation(X,y)
from keras.models import load_model
X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET)
best_model=load_model(MODELPATH)
scores = best_model.evaluate(X, y, verbose=0)
print("The best model parameters were saved during cross-validation.")
print("Best was defined as maximum validation accuracy at end of any epoch.")
print("Now re-load the best model and test it on previously unseen data.")
print("Test on",len(pc_test),"PC seqs")
print("Test on",len(nc_test),"NC seqs")
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [0 for _ in range(len(y))]
bm_probs = best_model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
###Output
_____no_output_____ |
Exercise_1_House_Prices_Question.ipynb | ###Markdown
GRADED FUNCTION: house_model
###Code
def house_model(y_new):
xs = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([50,100,150,200,250], dtype=float)
model = tf.keras.Sequential([keras.layers.Dense(units=12, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs,ys,epochs=1000)
return model.predict(y_new)[0]
def main():
print('hi')
prediction = house_model([7.0])
print(prediction)
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model.fit(xs, ys, epochs=500)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = keras.Sequential([keras.layers.Dense(units = 1, input_shape = [1])])
model.compile(optimizer = 'sgd', loss = 'mean_squared_error')
xs = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
ys = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5])
model.fit(xs, ys, epochs = 500)
print(model.predict([7.0]))
###Output
Epoch 1/500
1/1 [==============================] - 0s 1ms/step - loss: 45.4652
Epoch 2/500
1/1 [==============================] - 0s 2ms/step - loss: 23.9537
Epoch 3/500
1/1 [==============================] - 0s 1ms/step - loss: 12.6207
Epoch 4/500
1/1 [==============================] - 0s 2ms/step - loss: 6.6500
Epoch 5/500
1/1 [==============================] - 0s 2ms/step - loss: 3.5044
Epoch 6/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8471
Epoch 7/500
1/1 [==============================] - 0s 2ms/step - loss: 0.9740
Epoch 8/500
1/1 [==============================] - 0s 2ms/step - loss: 0.5140
Epoch 9/500
1/1 [==============================] - 0s 2ms/step - loss: 0.2717
Epoch 10/500
1/1 [==============================] - 0s 2ms/step - loss: 0.1440
Epoch 11/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0767
Epoch 12/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0413
Epoch 13/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0226
Epoch 14/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0127
Epoch 15/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0075
Epoch 16/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 17/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0033
Epoch 18/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 19/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 20/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0019
Epoch 21/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 22/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0017
Epoch 23/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0016
Epoch 24/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 25/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0016
Epoch 26/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 27/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 28/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 29/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 30/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 31/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 32/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0014
Epoch 33/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0014
Epoch 34/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 35/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 36/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 37/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 38/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0013
Epoch 39/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 40/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 41/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 42/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0013
Epoch 43/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0013
Epoch 44/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 45/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 46/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 47/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 48/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 49/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 50/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 51/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 52/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 53/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 54/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 55/500
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 56/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 57/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 58/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 59/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 60/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 61/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 62/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 63/500
1/1 [==============================] - 0s 2ms/step - loss: 9.9248e-04
Epoch 64/500
1/1 [==============================] - 0s 2ms/step - loss: 9.8093e-04
Epoch 65/500
1/1 [==============================] - 0s 2ms/step - loss: 9.6952e-04
Epoch 66/500
1/1 [==============================] - 0s 2ms/step - loss: 9.5823e-04
Epoch 67/500
1/1 [==============================] - 0s 3ms/step - loss: 9.4708e-04
Epoch 68/500
1/1 [==============================] - 0s 2ms/step - loss: 9.3606e-04
Epoch 69/500
1/1 [==============================] - 0s 2ms/step - loss: 9.2517e-04
Epoch 70/500
1/1 [==============================] - 0s 3ms/step - loss: 9.1440e-04
Epoch 71/500
1/1 [==============================] - 0s 2ms/step - loss: 9.0376e-04
Epoch 72/500
1/1 [==============================] - 0s 2ms/step - loss: 8.9324e-04
Epoch 73/500
1/1 [==============================] - 0s 2ms/step - loss: 8.8284e-04
Epoch 74/500
1/1 [==============================] - 0s 2ms/step - loss: 8.7257e-04
Epoch 75/500
1/1 [==============================] - 0s 2ms/step - loss: 8.6241e-04
Epoch 76/500
1/1 [==============================] - 0s 3ms/step - loss: 8.5238e-04
Epoch 77/500
1/1 [==============================] - 0s 2ms/step - loss: 8.4246e-04
Epoch 78/500
1/1 [==============================] - 0s 2ms/step - loss: 8.3265e-04
Epoch 79/500
1/1 [==============================] - 0s 2ms/step - loss: 8.2296e-04
Epoch 80/500
1/1 [==============================] - 0s 2ms/step - loss: 8.1339e-04
Epoch 81/500
1/1 [==============================] - 0s 2ms/step - loss: 8.0392e-04
Epoch 82/500
1/1 [==============================] - 0s 1ms/step - loss: 7.9456e-04
Epoch 83/500
1/1 [==============================] - 0s 1ms/step - loss: 7.8532e-04
Epoch 84/500
1/1 [==============================] - 0s 2ms/step - loss: 7.7618e-04
Epoch 85/500
1/1 [==============================] - 0s 1ms/step - loss: 7.6714e-04
Epoch 86/500
1/1 [==============================] - 0s 2ms/step - loss: 7.5822e-04
Epoch 87/500
1/1 [==============================] - 0s 1ms/step - loss: 7.4939e-04
Epoch 88/500
1/1 [==============================] - 0s 1ms/step - loss: 7.4067e-04
Epoch 89/500
1/1 [==============================] - 0s 4ms/step - loss: 7.3205e-04
Epoch 90/500
1/1 [==============================] - 0s 3ms/step - loss: 7.2353e-04
Epoch 91/500
1/1 [==============================] - 0s 2ms/step - loss: 7.1511e-04
Epoch 92/500
1/1 [==============================] - 0s 2ms/step - loss: 7.0679e-04
Epoch 93/500
1/1 [==============================] - 0s 2ms/step - loss: 6.9856e-04
Epoch 94/500
1/1 [==============================] - 0s 2ms/step - loss: 6.9043e-04
Epoch 95/500
1/1 [==============================] - 0s 2ms/step - loss: 6.8240e-04
Epoch 96/500
1/1 [==============================] - 0s 3ms/step - loss: 6.7446e-04
Epoch 97/500
1/1 [==============================] - 0s 3ms/step - loss: 6.6661e-04
Epoch 98/500
1/1 [==============================] - 0s 2ms/step - loss: 6.5885e-04
Epoch 99/500
1/1 [==============================] - 0s 1ms/step - loss: 6.5118e-04
Epoch 100/500
1/1 [==============================] - 0s 2ms/step - loss: 6.4361e-04
Epoch 101/500
1/1 [==============================] - 0s 1ms/step - loss: 6.3611e-04
Epoch 102/500
1/1 [==============================] - 0s 1ms/step - loss: 6.2871e-04
Epoch 103/500
1/1 [==============================] - 0s 2ms/step - loss: 6.2140e-04
Epoch 104/500
1/1 [==============================] - 0s 2ms/step - loss: 6.1416e-04
Epoch 105/500
1/1 [==============================] - 0s 2ms/step - loss: 6.0702e-04
Epoch 106/500
1/1 [==============================] - 0s 2ms/step - loss: 5.9995e-04
Epoch 107/500
1/1 [==============================] - 0s 2ms/step - loss: 5.9297e-04
Epoch 108/500
1/1 [==============================] - 0s 2ms/step - loss: 5.8607e-04
Epoch 109/500
1/1 [==============================] - 0s 2ms/step - loss: 5.7925e-04
Epoch 110/500
1/1 [==============================] - 0s 2ms/step - loss: 5.7251e-04
Epoch 111/500
1/1 [==============================] - 0s 2ms/step - loss: 5.6585e-04
Epoch 112/500
1/1 [==============================] - 0s 2ms/step - loss: 5.5926e-04
Epoch 113/500
1/1 [==============================] - 0s 2ms/step - loss: 5.5275e-04
Epoch 114/500
1/1 [==============================] - 0s 2ms/step - loss: 5.4632e-04
Epoch 115/500
1/1 [==============================] - 0s 2ms/step - loss: 5.3996e-04
Epoch 116/500
1/1 [==============================] - 0s 2ms/step - loss: 5.3368e-04
Epoch 117/500
1/1 [==============================] - 0s 1ms/step - loss: 5.2747e-04
Epoch 118/500
1/1 [==============================] - 0s 2ms/step - loss: 5.2133e-04
Epoch 119/500
1/1 [==============================] - 0s 1ms/step - loss: 5.1526e-04
Epoch 120/500
1/1 [==============================] - 0s 1ms/step - loss: 5.0926e-04
Epoch 121/500
1/1 [==============================] - 0s 1ms/step - loss: 5.0334e-04
Epoch 122/500
1/1 [==============================] - 0s 1ms/step - loss: 4.9748e-04
Epoch 123/500
1/1 [==============================] - 0s 2ms/step - loss: 4.9169e-04
Epoch 124/500
1/1 [==============================] - 0s 2ms/step - loss: 4.8597e-04
Epoch 125/500
1/1 [==============================] - 0s 2ms/step - loss: 4.8031e-04
Epoch 126/500
1/1 [==============================] - 0s 2ms/step - loss: 4.7472e-04
Epoch 127/500
1/1 [==============================] - 0s 1ms/step - loss: 4.6920e-04
Epoch 128/500
1/1 [==============================] - 0s 2ms/step - loss: 4.6374e-04
Epoch 129/500
1/1 [==============================] - 0s 2ms/step - loss: 4.5834e-04
Epoch 130/500
1/1 [==============================] - 0s 1ms/step - loss: 4.5301e-04
Epoch 131/500
1/1 [==============================] - 0s 1ms/step - loss: 4.4774e-04
Epoch 132/500
1/1 [==============================] - 0s 1ms/step - loss: 4.4253e-04
Epoch 133/500
1/1 [==============================] - 0s 2ms/step - loss: 4.3738e-04
Epoch 134/500
1/1 [==============================] - 0s 2ms/step - loss: 4.3228e-04
Epoch 135/500
1/1 [==============================] - 0s 2ms/step - loss: 4.2725e-04
Epoch 136/500
1/1 [==============================] - 0s 2ms/step - loss: 4.2228e-04
Epoch 137/500
1/1 [==============================] - 0s 2ms/step - loss: 4.1737e-04
Epoch 138/500
1/1 [==============================] - 0s 2ms/step - loss: 4.1251e-04
Epoch 139/500
1/1 [==============================] - 0s 2ms/step - loss: 4.0771e-04
Epoch 140/500
1/1 [==============================] - 0s 2ms/step - loss: 4.0296e-04
Epoch 141/500
1/1 [==============================] - 0s 1ms/step - loss: 3.9827e-04
Epoch 142/500
1/1 [==============================] - 0s 1ms/step - loss: 3.9364e-04
Epoch 143/500
1/1 [==============================] - 0s 1ms/step - loss: 3.8906e-04
Epoch 144/500
1/1 [==============================] - 0s 1ms/step - loss: 3.8453e-04
Epoch 145/500
1/1 [==============================] - 0s 2ms/step - loss: 3.8006e-04
Epoch 146/500
1/1 [==============================] - 0s 2ms/step - loss: 3.7563e-04
Epoch 147/500
1/1 [==============================] - 0s 2ms/step - loss: 3.7126e-04
Epoch 148/500
1/1 [==============================] - 0s 2ms/step - loss: 3.6694e-04
Epoch 149/500
1/1 [==============================] - 0s 3ms/step - loss: 3.6267e-04
Epoch 150/500
1/1 [==============================] - 0s 2ms/step - loss: 3.5845e-04
Epoch 151/500
1/1 [==============================] - 0s 1ms/step - loss: 3.5428e-04
Epoch 152/500
1/1 [==============================] - 0s 1ms/step - loss: 3.5015e-04
Epoch 153/500
1/1 [==============================] - 0s 1ms/step - loss: 3.4608e-04
Epoch 154/500
1/1 [==============================] - 0s 1ms/step - loss: 3.4205e-04
Epoch 155/500
1/1 [==============================] - 0s 1ms/step - loss: 3.3807e-04
Epoch 156/500
1/1 [==============================] - 0s 2ms/step - loss: 3.3414e-04
Epoch 157/500
1/1 [==============================] - 0s 2ms/step - loss: 3.3025e-04
Epoch 158/500
1/1 [==============================] - 0s 2ms/step - loss: 3.2641e-04
Epoch 159/500
1/1 [==============================] - 0s 1ms/step - loss: 3.2261e-04
Epoch 160/500
1/1 [==============================] - 0s 1ms/step - loss: 3.1885e-04
Epoch 161/500
1/1 [==============================] - 0s 1ms/step - loss: 3.1514e-04
Epoch 162/500
1/1 [==============================] - 0s 1ms/step - loss: 3.1147e-04
Epoch 163/500
1/1 [==============================] - 0s 2ms/step - loss: 3.0785e-04
Epoch 164/500
1/1 [==============================] - 0s 2ms/step - loss: 3.0427e-04
Epoch 165/500
1/1 [==============================] - 0s 1ms/step - loss: 3.0073e-04
Epoch 166/500
1/1 [==============================] - 0s 2ms/step - loss: 2.9723e-04
Epoch 167/500
1/1 [==============================] - 0s 3ms/step - loss: 2.9377e-04
Epoch 168/500
1/1 [==============================] - 0s 1ms/step - loss: 2.9035e-04
Epoch 169/500
1/1 [==============================] - 0s 1ms/step - loss: 2.8697e-04
Epoch 170/500
1/1 [==============================] - 0s 2ms/step - loss: 2.8363e-04
Epoch 171/500
1/1 [==============================] - 0s 2ms/step - loss: 2.8033e-04
Epoch 172/500
1/1 [==============================] - 0s 2ms/step - loss: 2.7707e-04
Epoch 173/500
1/1 [==============================] - 0s 2ms/step - loss: 2.7384e-04
Epoch 174/500
1/1 [==============================] - 0s 2ms/step - loss: 2.7065e-04
Epoch 175/500
1/1 [==============================] - 0s 1ms/step - loss: 2.6750e-04
Epoch 176/500
1/1 [==============================] - 0s 2ms/step - loss: 2.6439e-04
Epoch 177/500
1/1 [==============================] - 0s 2ms/step - loss: 2.6132e-04
Epoch 178/500
1/1 [==============================] - 0s 2ms/step - loss: 2.5827e-04
Epoch 179/500
1/1 [==============================] - 0s 5ms/step - loss: 2.5527e-04
Epoch 180/500
1/1 [==============================] - 0s 1ms/step - loss: 2.5230e-04
Epoch 181/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4936e-04
Epoch 182/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4646e-04
Epoch 183/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4359e-04
Epoch 184/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4076e-04
Epoch 185/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3795e-04
Epoch 186/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3519e-04
Epoch 187/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3245e-04
Epoch 188/500
1/1 [==============================] - 0s 1ms/step - loss: 2.2974e-04
Epoch 189/500
1/1 [==============================] - 0s 2ms/step - loss: 2.2707e-04
Epoch 190/500
1/1 [==============================] - 0s 1ms/step - loss: 2.2443e-04
Epoch 191/500
1/1 [==============================] - 0s 1ms/step - loss: 2.2182e-04
Epoch 192/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1923e-04
Epoch 193/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1668e-04
Epoch 194/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1416e-04
Epoch 195/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1167e-04
Epoch 196/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0920e-04
Epoch 197/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0677e-04
Epoch 198/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0436e-04
Epoch 199/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0198e-04
Epoch 200/500
1/1 [==============================] - 0s 3ms/step - loss: 1.9963e-04
Epoch 201/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9731e-04
Epoch 202/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9501e-04
Epoch 203/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9274e-04
Epoch 204/500
1/1 [==============================] - 0s 3ms/step - loss: 1.9050e-04
Epoch 205/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8828e-04
Epoch 206/500
1/1 [==============================] - 0s 3ms/step - loss: 1.8609e-04
Epoch 207/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8393e-04
Epoch 208/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8179e-04
Epoch 209/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7967e-04
Epoch 210/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7758e-04
Epoch 211/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7552e-04
Epoch 212/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7347e-04
Epoch 213/500
1/1 [==============================] - 0s 3ms/step - loss: 1.7145e-04
Epoch 214/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6946e-04
Epoch 215/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6749e-04
Epoch 216/500
1/1 [==============================] - 0s 998us/step - loss: 1.6554e-04
Epoch 217/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6361e-04
Epoch 218/500
1/1 [==============================] - 0s 1ms/step - loss: 1.6171e-04
Epoch 219/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5982e-04
Epoch 220/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5796e-04
Epoch 221/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5613e-04
Epoch 222/500
1/1 [==============================] - 0s 5ms/step - loss: 1.5431e-04
Epoch 223/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5251e-04
Epoch 224/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5074e-04
Epoch 225/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4898e-04
Epoch 226/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4725e-04
Epoch 227/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4554e-04
Epoch 228/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4384e-04
Epoch 229/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4217e-04
Epoch 230/500
1/1 [==============================] - 0s 1ms/step - loss: 1.4051e-04
Epoch 231/500
1/1 [==============================] - 0s 1ms/step - loss: 1.3888e-04
Epoch 232/500
1/1 [==============================] - 0s 1ms/step - loss: 1.3726e-04
Epoch 233/500
1/1 [==============================] - 0s 4ms/step - loss: 1.3567e-04
Epoch 234/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3409e-04
Epoch 235/500
1/1 [==============================] - 0s 1ms/step - loss: 1.3253e-04
Epoch 236/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3098e-04
Epoch 237/500
1/1 [==============================] - 0s 1ms/step - loss: 1.2946e-04
Epoch 238/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2795e-04
Epoch 239/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2646e-04
Epoch 240/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2499e-04
Epoch 241/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2354e-04
Epoch 242/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2210e-04
Epoch 243/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2068e-04
Epoch 244/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1927e-04
Epoch 245/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1789e-04
Epoch 246/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1651e-04
Epoch 247/500
1/1 [==============================] - 0s 3ms/step - loss: 1.1516e-04
Epoch 248/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1382e-04
Epoch 249/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1249e-04
Epoch 250/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1118e-04
Epoch 251/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0989e-04
Epoch 252/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0861e-04
Epoch 253/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0735e-04
Epoch 254/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0610e-04
Epoch 255/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0486e-04
Epoch 256/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0364e-04
Epoch 257/500
1/1 [==============================] - 0s 1ms/step - loss: 1.0244e-04
Epoch 258/500
1/1 [==============================] - 0s 1ms/step - loss: 1.0124e-04
Epoch 259/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0007e-04
Epoch 260/500
1/1 [==============================] - 0s 2ms/step - loss: 9.8902e-05
Epoch 261/500
1/1 [==============================] - 0s 1ms/step - loss: 9.7751e-05
Epoch 262/500
1/1 [==============================] - 0s 1ms/step - loss: 9.6613e-05
Epoch 263/500
1/1 [==============================] - 0s 1ms/step - loss: 9.5489e-05
Epoch 264/500
1/1 [==============================] - 0s 2ms/step - loss: 9.4378e-05
Epoch 265/500
1/1 [==============================] - 0s 1ms/step - loss: 9.3279e-05
Epoch 266/500
1/1 [==============================] - 0s 2ms/step - loss: 9.2193e-05
Epoch 267/500
1/1 [==============================] - 0s 2ms/step - loss: 9.1120e-05
Epoch 268/500
1/1 [==============================] - 0s 2ms/step - loss: 9.0060e-05
Epoch 269/500
1/1 [==============================] - 0s 2ms/step - loss: 8.9012e-05
Epoch 270/500
1/1 [==============================] - 0s 2ms/step - loss: 8.7976e-05
Epoch 271/500
1/1 [==============================] - 0s 2ms/step - loss: 8.6952e-05
Epoch 272/500
1/1 [==============================] - 0s 2ms/step - loss: 8.5940e-05
Epoch 273/500
1/1 [==============================] - 0s 1ms/step - loss: 8.4940e-05
Epoch 274/500
1/1 [==============================] - 0s 2ms/step - loss: 8.3951e-05
Epoch 275/500
1/1 [==============================] - 0s 2ms/step - loss: 8.2974e-05
Epoch 276/500
1/1 [==============================] - 0s 1ms/step - loss: 8.2009e-05
Epoch 277/500
1/1 [==============================] - 0s 2ms/step - loss: 8.1054e-05
Epoch 278/500
1/1 [==============================] - 0s 1ms/step - loss: 8.0111e-05
Epoch 279/500
1/1 [==============================] - 0s 1ms/step - loss: 7.9178e-05
Epoch 280/500
1/1 [==============================] - 0s 1ms/step - loss: 7.8257e-05
Epoch 281/500
1/1 [==============================] - 0s 1ms/step - loss: 7.7346e-05
Epoch 282/500
1/1 [==============================] - 0s 2ms/step - loss: 7.6446e-05
Epoch 283/500
1/1 [==============================] - 0s 1ms/step - loss: 7.5557e-05
Epoch 284/500
1/1 [==============================] - 0s 1ms/step - loss: 7.4677e-05
Epoch 285/500
1/1 [==============================] - 0s 1ms/step - loss: 7.3808e-05
Epoch 286/500
1/1 [==============================] - 0s 1ms/step - loss: 7.2949e-05
Epoch 287/500
1/1 [==============================] - 0s 2ms/step - loss: 7.2100e-05
Epoch 288/500
1/1 [==============================] - 0s 1ms/step - loss: 7.1261e-05
Epoch 289/500
1/1 [==============================] - 0s 1ms/step - loss: 7.0432e-05
Epoch 290/500
1/1 [==============================] - 0s 1ms/step - loss: 6.9612e-05
Epoch 291/500
1/1 [==============================] - 0s 1ms/step - loss: 6.8802e-05
Epoch 292/500
1/1 [==============================] - 0s 2ms/step - loss: 6.8002e-05
Epoch 293/500
1/1 [==============================] - 0s 1ms/step - loss: 6.7210e-05
Epoch 294/500
1/1 [==============================] - 0s 2ms/step - loss: 6.6428e-05
Epoch 295/500
1/1 [==============================] - 0s 2ms/step - loss: 6.5655e-05
Epoch 296/500
1/1 [==============================] - 0s 9ms/step - loss: 6.4890e-05
Epoch 297/500
1/1 [==============================] - 0s 2ms/step - loss: 6.4136e-05
Epoch 298/500
1/1 [==============================] - 0s 2ms/step - loss: 6.3389e-05
Epoch 299/500
1/1 [==============================] - 0s 1ms/step - loss: 6.2652e-05
Epoch 300/500
1/1 [==============================] - 0s 998us/step - loss: 6.1922e-05
Epoch 301/500
1/1 [==============================] - 0s 1ms/step - loss: 6.1202e-05
Epoch 302/500
1/1 [==============================] - 0s 2ms/step - loss: 6.0490e-05
Epoch 303/500
1/1 [==============================] - 0s 1ms/step - loss: 5.9786e-05
Epoch 304/500
1/1 [==============================] - 0s 1ms/step - loss: 5.9090e-05
Epoch 305/500
1/1 [==============================] - 0s 925us/step - loss: 5.8402e-05
Epoch 306/500
1/1 [==============================] - 0s 1ms/step - loss: 5.7723e-05
Epoch 307/500
1/1 [==============================] - 0s 1ms/step - loss: 5.7051e-05
Epoch 308/500
1/1 [==============================] - 0s 997us/step - loss: 5.6386e-05
Epoch 309/500
1/1 [==============================] - 0s 1ms/step - loss: 5.5730e-05
Epoch 310/500
1/1 [==============================] - 0s 1ms/step - loss: 5.5082e-05
Epoch 311/500
1/1 [==============================] - 0s 1ms/step - loss: 5.4441e-05
Epoch 312/500
1/1 [==============================] - 0s 1ms/step - loss: 5.3807e-05
Epoch 313/500
1/1 [==============================] - 0s 1ms/step - loss: 5.3181e-05
Epoch 314/500
1/1 [==============================] - 0s 996us/step - loss: 5.2563e-05
Epoch 315/500
1/1 [==============================] - 0s 1ms/step - loss: 5.1950e-05
Epoch 316/500
1/1 [==============================] - 0s 994us/step - loss: 5.1346e-05
Epoch 317/500
1/1 [==============================] - 0s 1ms/step - loss: 5.0748e-05
Epoch 318/500
1/1 [==============================] - 0s 1ms/step - loss: 5.0158e-05
Epoch 319/500
1/1 [==============================] - 0s 1ms/step - loss: 4.9574e-05
Epoch 320/500
1/1 [==============================] - 0s 1ms/step - loss: 4.8997e-05
Epoch 321/500
1/1 [==============================] - 0s 991us/step - loss: 4.8427e-05
Epoch 322/500
1/1 [==============================] - 0s 1ms/step - loss: 4.7863e-05
Epoch 323/500
1/1 [==============================] - 0s 1ms/step - loss: 4.7306e-05
Epoch 324/500
1/1 [==============================] - 0s 2ms/step - loss: 4.6755e-05
Epoch 325/500
1/1 [==============================] - 0s 2ms/step - loss: 4.6211e-05
Epoch 326/500
1/1 [==============================] - 0s 2ms/step - loss: 4.5674e-05
Epoch 327/500
1/1 [==============================] - 0s 2ms/step - loss: 4.5143e-05
Epoch 328/500
1/1 [==============================] - 0s 2ms/step - loss: 4.4617e-05
Epoch 329/500
1/1 [==============================] - 0s 2ms/step - loss: 4.4098e-05
Epoch 330/500
1/1 [==============================] - 0s 7ms/step - loss: 4.3584e-05
Epoch 331/500
1/1 [==============================] - 0s 2ms/step - loss: 4.3077e-05
Epoch 332/500
1/1 [==============================] - 0s 1ms/step - loss: 4.2576e-05
Epoch 333/500
1/1 [==============================] - 0s 2ms/step - loss: 4.2081e-05
Epoch 334/500
1/1 [==============================] - 0s 2ms/step - loss: 4.1591e-05
Epoch 335/500
1/1 [==============================] - 0s 1ms/step - loss: 4.1107e-05
Epoch 336/500
1/1 [==============================] - 0s 1ms/step - loss: 4.0628e-05
Epoch 337/500
1/1 [==============================] - 0s 2ms/step - loss: 4.0155e-05
Epoch 338/500
1/1 [==============================] - 0s 2ms/step - loss: 3.9688e-05
Epoch 339/500
1/1 [==============================] - 0s 2ms/step - loss: 3.9226e-05
Epoch 340/500
1/1 [==============================] - 0s 1ms/step - loss: 3.8770e-05
Epoch 341/500
1/1 [==============================] - 0s 2ms/step - loss: 3.8318e-05
Epoch 342/500
1/1 [==============================] - 0s 3ms/step - loss: 3.7872e-05
Epoch 343/500
1/1 [==============================] - 0s 2ms/step - loss: 3.7432e-05
Epoch 344/500
1/1 [==============================] - 0s 2ms/step - loss: 3.6996e-05
Epoch 345/500
1/1 [==============================] - 0s 1ms/step - loss: 3.6565e-05
Epoch 346/500
1/1 [==============================] - 0s 1ms/step - loss: 3.6140e-05
Epoch 347/500
1/1 [==============================] - 0s 2ms/step - loss: 3.5720e-05
Epoch 348/500
1/1 [==============================] - 0s 1ms/step - loss: 3.5303e-05
Epoch 349/500
1/1 [==============================] - 0s 1ms/step - loss: 3.4893e-05
Epoch 350/500
1/1 [==============================] - 0s 1ms/step - loss: 3.4487e-05
Epoch 351/500
1/1 [==============================] - 0s 1ms/step - loss: 3.4086e-05
Epoch 352/500
1/1 [==============================] - 0s 3ms/step - loss: 3.3689e-05
Epoch 353/500
1/1 [==============================] - 0s 2ms/step - loss: 3.3297e-05
Epoch 354/500
1/1 [==============================] - 0s 2ms/step - loss: 3.2909e-05
Epoch 355/500
1/1 [==============================] - 0s 2ms/step - loss: 3.2526e-05
Epoch 356/500
1/1 [==============================] - 0s 2ms/step - loss: 3.2147e-05
Epoch 357/500
1/1 [==============================] - 0s 2ms/step - loss: 3.1773e-05
Epoch 358/500
1/1 [==============================] - 0s 2ms/step - loss: 3.1404e-05
Epoch 359/500
1/1 [==============================] - 0s 2ms/step - loss: 3.1038e-05
Epoch 360/500
1/1 [==============================] - 0s 2ms/step - loss: 3.0677e-05
Epoch 361/500
1/1 [==============================] - 0s 2ms/step - loss: 3.0320e-05
Epoch 362/500
1/1 [==============================] - 0s 2ms/step - loss: 2.9967e-05
Epoch 363/500
1/1 [==============================] - 0s 2ms/step - loss: 2.9618e-05
Epoch 364/500
1/1 [==============================] - 0s 2ms/step - loss: 2.9274e-05
Epoch 365/500
1/1 [==============================] - 0s 2ms/step - loss: 2.8933e-05
Epoch 366/500
1/1 [==============================] - 0s 2ms/step - loss: 2.8596e-05
Epoch 367/500
1/1 [==============================] - 0s 2ms/step - loss: 2.8263e-05
Epoch 368/500
1/1 [==============================] - 0s 3ms/step - loss: 2.7935e-05
Epoch 369/500
1/1 [==============================] - 0s 2ms/step - loss: 2.7609e-05
Epoch 370/500
1/1 [==============================] - 0s 1ms/step - loss: 2.7288e-05
Epoch 371/500
1/1 [==============================] - 0s 2ms/step - loss: 2.6970e-05
Epoch 372/500
1/1 [==============================] - 0s 2ms/step - loss: 2.6656e-05
Epoch 373/500
1/1 [==============================] - 0s 2ms/step - loss: 2.6346e-05
Epoch 374/500
1/1 [==============================] - 0s 1ms/step - loss: 2.6040e-05
Epoch 375/500
1/1 [==============================] - 0s 2ms/step - loss: 2.5736e-05
Epoch 376/500
1/1 [==============================] - 0s 2ms/step - loss: 2.5437e-05
Epoch 377/500
1/1 [==============================] - 0s 2ms/step - loss: 2.5141e-05
Epoch 378/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4848e-05
Epoch 379/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4559e-05
Epoch 380/500
1/1 [==============================] - 0s 2ms/step - loss: 2.4273e-05
Epoch 381/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3991e-05
Epoch 382/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3712e-05
Epoch 383/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3436e-05
Epoch 384/500
1/1 [==============================] - 0s 2ms/step - loss: 2.3163e-05
Epoch 385/500
1/1 [==============================] - 0s 2ms/step - loss: 2.2893e-05
Epoch 386/500
1/1 [==============================] - 0s 2ms/step - loss: 2.2627e-05
Epoch 387/500
1/1 [==============================] - 0s 2ms/step - loss: 2.2364e-05
Epoch 388/500
1/1 [==============================] - 0s 2ms/step - loss: 2.2104e-05
Epoch 389/500
1/1 [==============================] - 0s 3ms/step - loss: 2.1847e-05
Epoch 390/500
1/1 [==============================] - 0s 3ms/step - loss: 2.1592e-05
Epoch 391/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1341e-05
Epoch 392/500
1/1 [==============================] - 0s 2ms/step - loss: 2.1093e-05
Epoch 393/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0847e-05
Epoch 394/500
1/1 [==============================] - 0s 2ms/step - loss: 2.0605e-05
Epoch 395/500
1/1 [==============================] - 0s 4ms/step - loss: 2.0365e-05
Epoch 396/500
1/1 [==============================] - 0s 3ms/step - loss: 2.0128e-05
Epoch 397/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9894e-05
Epoch 398/500
1/1 [==============================] - 0s 3ms/step - loss: 1.9662e-05
Epoch 399/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9433e-05
Epoch 400/500
1/1 [==============================] - 0s 2ms/step - loss: 1.9207e-05
Epoch 401/500
1/1 [==============================] - 0s 3ms/step - loss: 1.8983e-05
Epoch 402/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8763e-05
Epoch 403/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8544e-05
Epoch 404/500
1/1 [==============================] - 0s 2ms/step - loss: 1.8329e-05
Epoch 405/500
1/1 [==============================] - 0s 3ms/step - loss: 1.8115e-05
Epoch 406/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7904e-05
Epoch 407/500
1/1 [==============================] - 0s 3ms/step - loss: 1.7696e-05
Epoch 408/500
1/1 [==============================] - 0s 3ms/step - loss: 1.7490e-05
Epoch 409/500
1/1 [==============================] - 0s 2ms/step - loss: 1.7287e-05
Epoch 410/500
1/1 [==============================] - 0s 1ms/step - loss: 1.7085e-05
Epoch 411/500
1/1 [==============================] - 0s 3ms/step - loss: 1.6887e-05
Epoch 412/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6690e-05
Epoch 413/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6496e-05
Epoch 414/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6304e-05
Epoch 415/500
1/1 [==============================] - 0s 2ms/step - loss: 1.6114e-05
Epoch 416/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5927e-05
Epoch 417/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5742e-05
Epoch 418/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5558e-05
Epoch 419/500
1/1 [==============================] - 0s 3ms/step - loss: 1.5377e-05
Epoch 420/500
1/1 [==============================] - 0s 1ms/step - loss: 1.5198e-05
Epoch 421/500
1/1 [==============================] - 0s 2ms/step - loss: 1.5021e-05
Epoch 422/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4846e-05
Epoch 423/500
1/1 [==============================] - 0s 1ms/step - loss: 1.4674e-05
Epoch 424/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4503e-05
Epoch 425/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4334e-05
Epoch 426/500
1/1 [==============================] - 0s 1ms/step - loss: 1.4167e-05
Epoch 427/500
1/1 [==============================] - 0s 2ms/step - loss: 1.4003e-05
Epoch 428/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3840e-05
Epoch 429/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3678e-05
Epoch 430/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3519e-05
Epoch 431/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3362e-05
Epoch 432/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3207e-05
Epoch 433/500
1/1 [==============================] - 0s 2ms/step - loss: 1.3053e-05
Epoch 434/500
1/1 [==============================] - 0s 1ms/step - loss: 1.2901e-05
Epoch 435/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2751e-05
Epoch 436/500
1/1 [==============================] - 0s 1ms/step - loss: 1.2602e-05
Epoch 437/500
1/1 [==============================] - 0s 1ms/step - loss: 1.2456e-05
Epoch 438/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2310e-05
Epoch 439/500
1/1 [==============================] - 0s 2ms/step - loss: 1.2167e-05
Epoch 440/500
1/1 [==============================] - 0s 1ms/step - loss: 1.2026e-05
Epoch 441/500
1/1 [==============================] - 0s 1ms/step - loss: 1.1886e-05
Epoch 442/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1747e-05
Epoch 443/500
1/1 [==============================] - 0s 2ms/step - loss: 1.1611e-05
Epoch 444/500
1/1 [==============================] - 0s 1ms/step - loss: 1.1475e-05
Epoch 445/500
1/1 [==============================] - 0s 1ms/step - loss: 1.1342e-05
Epoch 446/500
1/1 [==============================] - 0s 1ms/step - loss: 1.1210e-05
Epoch 447/500
1/1 [==============================] - 0s 1ms/step - loss: 1.1080e-05
Epoch 448/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0951e-05
Epoch 449/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0823e-05
Epoch 450/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0697e-05
Epoch 451/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0573e-05
Epoch 452/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0450e-05
Epoch 453/500
1/1 [==============================] - 0s 1ms/step - loss: 1.0328e-05
Epoch 454/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0208e-05
Epoch 455/500
1/1 [==============================] - 0s 2ms/step - loss: 1.0089e-05
Epoch 456/500
1/1 [==============================] - 0s 2ms/step - loss: 9.9718e-06
Epoch 457/500
1/1 [==============================] - 0s 2ms/step - loss: 9.8557e-06
Epoch 458/500
1/1 [==============================] - 0s 2ms/step - loss: 9.7409e-06
Epoch 459/500
1/1 [==============================] - 0s 2ms/step - loss: 9.6276e-06
Epoch 460/500
1/1 [==============================] - 0s 2ms/step - loss: 9.5155e-06
Epoch 461/500
1/1 [==============================] - 0s 2ms/step - loss: 9.4049e-06
Epoch 462/500
1/1 [==============================] - 0s 2ms/step - loss: 9.2954e-06
Epoch 463/500
1/1 [==============================] - 0s 2ms/step - loss: 9.1873e-06
Epoch 464/500
1/1 [==============================] - 0s 1ms/step - loss: 9.0802e-06
Epoch 465/500
1/1 [==============================] - 0s 2ms/step - loss: 8.9746e-06
Epoch 466/500
1/1 [==============================] - 0s 2ms/step - loss: 8.8700e-06
Epoch 467/500
1/1 [==============================] - 0s 2ms/step - loss: 8.7669e-06
Epoch 468/500
1/1 [==============================] - 0s 2ms/step - loss: 8.6648e-06
Epoch 469/500
1/1 [==============================] - 0s 2ms/step - loss: 8.5639e-06
Epoch 470/500
1/1 [==============================] - 0s 1ms/step - loss: 8.4641e-06
Epoch 471/500
1/1 [==============================] - 0s 2ms/step - loss: 8.3657e-06
Epoch 472/500
1/1 [==============================] - 0s 2ms/step - loss: 8.2683e-06
Epoch 473/500
1/1 [==============================] - 0s 2ms/step - loss: 8.1721e-06
Epoch 474/500
1/1 [==============================] - 0s 2ms/step - loss: 8.0770e-06
Epoch 475/500
1/1 [==============================] - 0s 2ms/step - loss: 7.9831e-06
Epoch 476/500
1/1 [==============================] - 0s 2ms/step - loss: 7.8901e-06
Epoch 477/500
1/1 [==============================] - 0s 1ms/step - loss: 7.7982e-06
Epoch 478/500
1/1 [==============================] - 0s 2ms/step - loss: 7.7074e-06
Epoch 479/500
1/1 [==============================] - 0s 1ms/step - loss: 7.6179e-06
Epoch 480/500
1/1 [==============================] - 0s 1ms/step - loss: 7.5290e-06
Epoch 481/500
1/1 [==============================] - 0s 2ms/step - loss: 7.4414e-06
Epoch 482/500
1/1 [==============================] - 0s 2ms/step - loss: 7.3551e-06
Epoch 483/500
1/1 [==============================] - 0s 2ms/step - loss: 7.2693e-06
Epoch 484/500
1/1 [==============================] - 0s 2ms/step - loss: 7.1847e-06
Epoch 485/500
1/1 [==============================] - 0s 7ms/step - loss: 7.1010e-06
Epoch 486/500
1/1 [==============================] - 0s 2ms/step - loss: 7.0185e-06
Epoch 487/500
1/1 [==============================] - 0s 3ms/step - loss: 6.9368e-06
Epoch 488/500
1/1 [==============================] - 0s 2ms/step - loss: 6.8560e-06
Epoch 489/500
1/1 [==============================] - 0s 2ms/step - loss: 6.7762e-06
Epoch 490/500
1/1 [==============================] - 0s 2ms/step - loss: 6.6974e-06
Epoch 491/500
1/1 [==============================] - 0s 1ms/step - loss: 6.6194e-06
Epoch 492/500
1/1 [==============================] - 0s 2ms/step - loss: 6.5424e-06
Epoch 493/500
1/1 [==============================] - 0s 2ms/step - loss: 6.4664e-06
Epoch 494/500
1/1 [==============================] - 0s 2ms/step - loss: 6.3911e-06
Epoch 495/500
1/1 [==============================] - 0s 2ms/step - loss: 6.3168e-06
Epoch 496/500
1/1 [==============================] - 0s 2ms/step - loss: 6.2432e-06
Epoch 497/500
1/1 [==============================] - 0s 2ms/step - loss: 6.1705e-06
Epoch 498/500
1/1 [==============================] - 0s 2ms/step - loss: 6.0988e-06
Epoch 499/500
1/1 [==============================] - 0s 2ms/step - loss: 6.0278e-06
Epoch 500/500
1/1 [==============================] - 0s 1ms/step - loss: 5.9577e-06
[[4.0028524]]
###Markdown
Exercise 1 - House PricesIn this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc. Import Dependencies
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Utility Functions
###Code
def get_y(x_vals):
y_vals = [0.5*(x+1) for x in x_vals]
return np.array(y_vals, dtype=float)
###Output
_____no_output_____
###Markdown
Solution Code
###Code
model = keras.Sequential([keras.layers.Dense(units = 1, input_shape = [1])])
model.compile(optimizer="sgd", loss="mean_squared_error")
xs = np.array([1, 2, 3, 4, 5, 6], dtype=float) # number of bedrooms
ys = get_y(xs) # 0.5(1 + x); will multiply by 100 later
model.fit(xs,ys, epochs=550)
print(model.predict([7.0]))
###Output
[[4.0409164]]
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, unit_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squered_error')
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model.fit(xs, ys, epochs=500)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5], dtype=float)
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs,ys,epochs=500)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model.fit(xs, ys, epochs=1000)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs=np.array([1,2,3,4,5,6],dtype=float)
ys=np.array([1,1.5,2,2.5,3,3.5],dtype=float)
model.fit(xs, ys, epochs=500)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
def house_model(y_new):
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=500)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____
###Markdown
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
xs = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5])
ys = np.array([0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.25, 3.5, 3.75])
model = keras.Sequential([keras.layers.Dense(units = 1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=3000)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
Predicting Housing PricesIn this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units = 1, input_shape = [1])])
model.compile(optimizer = 'sgd', loss = 'mean_squared_error')
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype = float)
ys = np.array([100.0, 150.0, 200.0, 250.0, 300.0, 350.0], dtype = float)
model.fit(xs, ys/100.0, epochs = 1000)
print(model.predict([7.0]))
###Output
Epoch 1/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.4425
Epoch 2/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.2340
Epoch 3/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.1373
Epoch 4/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0924
Epoch 5/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0713
Epoch 6/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0614
Epoch 7/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0566
Epoch 8/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0542
Epoch 9/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0528
Epoch 10/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0520
Epoch 11/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0514
Epoch 12/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0510
Epoch 13/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0506
Epoch 14/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0502
Epoch 15/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0498
Epoch 16/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0494
Epoch 17/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0491
Epoch 18/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0487
Epoch 19/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0484
Epoch 20/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0480
Epoch 21/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0477
Epoch 22/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0473
Epoch 23/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0470
Epoch 24/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0466
Epoch 25/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0463
Epoch 26/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0459
Epoch 27/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0456
Epoch 28/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0453
Epoch 29/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0449
Epoch 30/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0446
Epoch 31/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0443
Epoch 32/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0440
Epoch 33/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0437
Epoch 34/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0433
Epoch 35/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0430
Epoch 36/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0427
Epoch 37/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0424
Epoch 38/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0421
Epoch 39/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0418
Epoch 40/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0415
Epoch 41/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0412
Epoch 42/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0409
Epoch 43/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0406
Epoch 44/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0403
Epoch 45/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0400
Epoch 46/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0397
Epoch 47/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0394
Epoch 48/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0391
Epoch 49/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0388
Epoch 50/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0386
Epoch 51/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0383
Epoch 52/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0380
Epoch 53/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0377
Epoch 54/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0374
Epoch 55/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0372
Epoch 56/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0369
Epoch 57/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0366
Epoch 58/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0364
Epoch 59/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0361
Epoch 60/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0358
Epoch 61/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0356
Epoch 62/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0353
Epoch 63/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0351
Epoch 64/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0348
Epoch 65/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0345
Epoch 66/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0343
Epoch 67/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0340
Epoch 68/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0338
Epoch 69/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0335
Epoch 70/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0333
Epoch 71/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0331
Epoch 72/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0328
Epoch 73/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0326
Epoch 74/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0323
Epoch 75/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0321
Epoch 76/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0319
Epoch 77/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0316
Epoch 78/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0314
Epoch 79/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0312
Epoch 80/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0310
Epoch 81/1000
1/1 [==============================] - 0s 978us/step - loss: 0.0307
Epoch 82/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0305
Epoch 83/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0303
Epoch 84/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0301
Epoch 85/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0298
Epoch 86/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0296
Epoch 87/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0294
Epoch 88/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0292
Epoch 89/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0290
Epoch 90/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0288
Epoch 91/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0286
Epoch 92/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0284
Epoch 93/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0281
Epoch 94/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0279
Epoch 95/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0277
Epoch 96/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0275
Epoch 97/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0273
Epoch 98/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0271
Epoch 99/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0269
Epoch 100/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0267
Epoch 101/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0266
Epoch 102/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0264
Epoch 103/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0262
Epoch 104/1000
1/1 [==============================] - 0s 6ms/step - loss: 0.0260
Epoch 105/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0258
Epoch 106/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0256
Epoch 107/1000
1/1 [==============================] - 0s 8ms/step - loss: 0.0254
Epoch 108/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0252
Epoch 109/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0250
Epoch 110/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0249
Epoch 111/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0247
Epoch 112/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0245
Epoch 113/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0243
Epoch 114/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0241
Epoch 115/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0240
Epoch 116/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0238
Epoch 117/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0236
Epoch 118/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0234
Epoch 119/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0233
Epoch 120/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0231
Epoch 121/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0229
Epoch 122/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0228
Epoch 123/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0226
Epoch 124/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0224
Epoch 125/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0223
Epoch 126/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0221
Epoch 127/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0220
Epoch 128/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0218
Epoch 129/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0216
Epoch 130/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0215
Epoch 131/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0213
Epoch 132/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0212
Epoch 133/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0210
Epoch 134/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0209
Epoch 135/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0207
Epoch 136/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0206
Epoch 137/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0204
Epoch 138/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0203
Epoch 139/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0201
Epoch 140/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0200
Epoch 141/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0198
Epoch 142/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0197
Epoch 143/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0195
Epoch 144/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0194
Epoch 145/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0192
Epoch 146/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0191
Epoch 147/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0190
Epoch 148/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0188
Epoch 149/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0187
Epoch 150/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0186
Epoch 151/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0184
Epoch 152/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0183
Epoch 153/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0182
Epoch 154/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0180
Epoch 155/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0179
Epoch 156/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0178
Epoch 157/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0176
Epoch 158/1000
1/1 [==============================] - 0s 5ms/step - loss: 0.0175
Epoch 159/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0174
Epoch 160/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0172
Epoch 161/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0171
Epoch 162/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0170
Epoch 163/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0169
Epoch 164/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0167
Epoch 165/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0166
Epoch 166/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0165
Epoch 167/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0164
Epoch 168/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0163
Epoch 169/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0161
Epoch 170/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0160
Epoch 171/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0159
Epoch 172/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0158
Epoch 173/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0157
Epoch 174/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0156
Epoch 175/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0155
Epoch 176/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0153
Epoch 177/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0152
Epoch 178/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0151
Epoch 179/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0150
Epoch 180/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0149
Epoch 181/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0148
Epoch 182/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0147
Epoch 183/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0146
Epoch 184/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0145
Epoch 185/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0144
Epoch 186/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0143
Epoch 187/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0142
Epoch 188/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0141
Epoch 189/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0140
Epoch 190/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0138
Epoch 191/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0137
Epoch 192/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0136
Epoch 193/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0135
Epoch 194/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0135
Epoch 195/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0134
Epoch 196/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0133
Epoch 197/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0132
Epoch 198/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0131
Epoch 199/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0130
Epoch 200/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0129
Epoch 201/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0128
Epoch 202/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0127
Epoch 203/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0126
Epoch 204/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0125
Epoch 205/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0124
Epoch 206/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0123
Epoch 207/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0122
Epoch 208/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0121
Epoch 209/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0121
Epoch 210/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0120
Epoch 211/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0119
Epoch 212/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0118
Epoch 213/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0117
Epoch 214/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0116
Epoch 215/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0115
Epoch 216/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0115
Epoch 217/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0114
Epoch 218/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0113
Epoch 219/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0112
Epoch 220/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0111
Epoch 221/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0110
Epoch 222/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0110
Epoch 223/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0109
Epoch 224/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0108
Epoch 225/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0107
Epoch 226/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0106
Epoch 227/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0106
Epoch 228/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0105
Epoch 229/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0104
Epoch 230/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0103
Epoch 231/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0103
Epoch 232/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0102
Epoch 233/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0101
Epoch 234/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0100
Epoch 235/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0100
Epoch 236/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0099
Epoch 237/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0098
Epoch 238/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0098
Epoch 239/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0097
Epoch 240/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0096
Epoch 241/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0095
Epoch 242/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0095
Epoch 243/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0094
Epoch 244/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0093
Epoch 245/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0093
Epoch 246/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0092
Epoch 247/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0091
Epoch 248/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0091
Epoch 249/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0090
Epoch 250/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0089
Epoch 251/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0089
Epoch 252/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0088
Epoch 253/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0087
Epoch 254/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0087
Epoch 255/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0086
Epoch 256/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0085
Epoch 257/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0085
Epoch 258/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0084
Epoch 259/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0084
Epoch 260/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0083
Epoch 261/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0082
Epoch 262/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0082
Epoch 263/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0081
Epoch 264/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0081
Epoch 265/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0080
Epoch 266/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0079
Epoch 267/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0079
Epoch 268/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0078
Epoch 269/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0078
Epoch 270/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0077
Epoch 271/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0077
Epoch 272/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0076
Epoch 273/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0075
Epoch 274/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0075
Epoch 275/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0074
Epoch 276/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0074
Epoch 277/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0073
Epoch 278/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0073
Epoch 279/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0072
Epoch 280/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0072
Epoch 281/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0071
Epoch 282/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0071
Epoch 283/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0070
Epoch 284/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0070
Epoch 285/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0069
Epoch 286/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0069
Epoch 287/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0068
Epoch 288/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0068
Epoch 289/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0067
Epoch 290/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0067
Epoch 291/1000
1/1 [==============================] - 0s 997us/step - loss: 0.0066
Epoch 292/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0066
Epoch 293/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0065
Epoch 294/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0065
Epoch 295/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0064
Epoch 296/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0064
Epoch 297/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0063
Epoch 298/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0063
Epoch 299/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0062
Epoch 300/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0062
Epoch 301/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0062
Epoch 302/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0061
Epoch 303/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0061
Epoch 304/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0060
Epoch 305/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0060
Epoch 306/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0059
Epoch 307/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0059
Epoch 308/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0058
Epoch 309/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0058
Epoch 310/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0058
Epoch 311/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0057
Epoch 312/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0057
Epoch 313/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0056
Epoch 314/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0056
Epoch 315/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0056
Epoch 316/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0055
Epoch 317/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0055
Epoch 318/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0054
Epoch 319/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0054
Epoch 320/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0054
Epoch 321/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0053
Epoch 322/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0053
Epoch 323/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0052
Epoch 324/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0052
Epoch 325/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0052
Epoch 326/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0051
Epoch 327/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0051
Epoch 328/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0050
Epoch 329/1000
1/1 [==============================] - 0s 932us/step - loss: 0.0050
Epoch 330/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0050
Epoch 331/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0049
Epoch 332/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0049
Epoch 333/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0049
Epoch 334/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 335/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 336/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 337/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0047
Epoch 338/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0047
Epoch 339/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0047
Epoch 340/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0046
Epoch 341/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0046
Epoch 342/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0046
Epoch 343/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0045
Epoch 344/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0045
Epoch 345/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0045
Epoch 346/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0044
Epoch 347/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0044
Epoch 348/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0044
Epoch 349/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0043
Epoch 350/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0043
Epoch 351/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0043
Epoch 352/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0042
Epoch 353/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0042
Epoch 354/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0042
Epoch 355/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0041
Epoch 356/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0041
Epoch 357/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0041
Epoch 358/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0041
Epoch 359/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0040
Epoch 360/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0040
Epoch 361/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0040
Epoch 362/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0039
Epoch 363/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0039
Epoch 364/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0039
Epoch 365/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0039
Epoch 366/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0038
Epoch 367/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0038
Epoch 368/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0038
Epoch 369/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0037
Epoch 370/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0037
Epoch 371/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0037
Epoch 372/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0037
Epoch 373/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0036
Epoch 374/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0036
Epoch 375/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0036
Epoch 376/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0036
Epoch 377/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0035
Epoch 378/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0035
Epoch 379/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0035
Epoch 380/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0035
Epoch 381/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0034
Epoch 382/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0034
Epoch 383/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0034
Epoch 384/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0034
Epoch 385/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0033
Epoch 386/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0033
Epoch 387/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0033
Epoch 388/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0033
Epoch 389/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0032
Epoch 390/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0032
Epoch 391/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0032
Epoch 392/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0032
Epoch 393/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0031
Epoch 394/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0031
Epoch 395/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0031
Epoch 396/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0031
Epoch 397/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0030
Epoch 398/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0030
Epoch 399/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0030
Epoch 400/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0030
Epoch 401/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0030
Epoch 402/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0029
Epoch 403/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0029
Epoch 404/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0029
Epoch 405/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0029
Epoch 406/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0029
Epoch 407/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0028
Epoch 408/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0028
Epoch 409/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0028
Epoch 410/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0028
Epoch 411/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0028
Epoch 412/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0027
Epoch 413/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0027
Epoch 414/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0027
Epoch 415/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0027
Epoch 416/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0027
Epoch 417/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0026
Epoch 418/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0026
Epoch 419/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0026
Epoch 420/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0026
Epoch 421/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0026
Epoch 422/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 423/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 424/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 425/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 426/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0025
Epoch 427/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 428/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 429/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 430/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 431/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 432/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0024
Epoch 433/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0023
Epoch 434/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0023
Epoch 435/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0023
Epoch 436/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0023
Epoch 437/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0023
Epoch 438/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0023
Epoch 439/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0022
Epoch 440/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0022
Epoch 441/1000
1/1 [==============================] - 0s 998us/step - loss: 0.0022
Epoch 442/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0022
Epoch 443/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0022
Epoch 444/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0022
Epoch 445/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0021
Epoch 446/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 447/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 448/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 449/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 450/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0021
Epoch 451/1000
1/1 [==============================] - 0s 973us/step - loss: 0.0021
Epoch 452/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 453/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 454/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 455/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 456/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 457/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 458/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0020
Epoch 459/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0019
Epoch 460/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0019
Epoch 461/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0019
Epoch 462/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0019
Epoch 463/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0019
Epoch 464/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0019
Epoch 465/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0019
Epoch 466/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0018
Epoch 467/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0018
Epoch 468/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0018
Epoch 469/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0018
Epoch 470/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0018
Epoch 471/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0018
Epoch 472/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0018
Epoch 473/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 474/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 475/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 476/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 477/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 478/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 479/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 480/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0017
Epoch 481/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 482/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 483/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 484/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 485/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 486/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0016
Epoch 487/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0016
Epoch 488/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0016
Epoch 489/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0016
Epoch 490/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 491/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 492/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 493/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0015
Epoch 494/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 495/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 496/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 497/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0015
Epoch 498/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0015
Epoch 499/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 500/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 501/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 502/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 503/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0014
Epoch 504/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 505/1000
1/1 [==============================] - 0s 7ms/step - loss: 0.0014
Epoch 506/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 507/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 508/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0014
Epoch 509/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 510/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 511/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 512/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 513/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 514/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 515/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0013
Epoch 516/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0013
Epoch 517/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0013
Epoch 518/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0013
Epoch 519/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 520/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 521/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 522/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 523/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 524/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 525/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 526/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 527/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 528/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0012
Epoch 529/1000
1/1 [==============================] - 0s 982us/step - loss: 0.0012
Epoch 530/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0012
Epoch 531/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 532/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 533/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 534/1000
1/1 [==============================] - 0s 818us/step - loss: 0.0011
Epoch 535/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 536/1000
1/1 [==============================] - 0s 3ms/step - loss: 0.0011
Epoch 537/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 538/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 539/1000
1/1 [==============================] - 0s 4ms/step - loss: 0.0011
Epoch 540/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 541/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0011
Epoch 542/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0011
Epoch 543/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 544/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0010
Epoch 545/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 546/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0010
Epoch 547/1000
1/1 [==============================] - 0s 1ms/step - loss: 0.0010
Epoch 548/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 549/1000
1/1 [==============================] - 0s 2ms/step - loss: 0.0010
Epoch 550/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.9586e-04
Epoch 551/1000
1/1 [==============================] - 0s 1ms/step - loss: 9.8861e-04
Epoch 552/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.8141e-04
Epoch 553/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.7426e-04
Epoch 554/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.6716e-04
Epoch 555/1000
1/1 [==============================] - 0s 6ms/step - loss: 9.6011e-04
Epoch 556/1000
1/1 [==============================] - 0s 1ms/step - loss: 9.5312e-04
Epoch 557/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.4617e-04
Epoch 558/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.3928e-04
Epoch 559/1000
1/1 [==============================] - 0s 1ms/step - loss: 9.3244e-04
Epoch 560/1000
1/1 [==============================] - 0s 1ms/step - loss: 9.2564e-04
Epoch 561/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.1890e-04
Epoch 562/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.1220e-04
Epoch 563/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.0556e-04
Epoch 564/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.9896e-04
Epoch 565/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.9241e-04
Epoch 566/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.8591e-04
Epoch 567/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.7946e-04
Epoch 568/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.7305e-04
Epoch 569/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.6669e-04
Epoch 570/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.6038e-04
Epoch 571/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.5411e-04
Epoch 572/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.4789e-04
Epoch 573/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.4171e-04
Epoch 574/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.3557e-04
Epoch 575/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.2949e-04
Epoch 576/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.2344e-04
Epoch 577/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.1744e-04
Epoch 578/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.1149e-04
Epoch 579/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.0558e-04
Epoch 580/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.9971e-04
Epoch 581/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.9388e-04
Epoch 582/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.8810e-04
Epoch 583/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.8236e-04
Epoch 584/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.7666e-04
Epoch 585/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.7100e-04
Epoch 586/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.6538e-04
Epoch 587/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.5980e-04
Epoch 588/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.5427e-04
Epoch 589/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.4878e-04
Epoch 590/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.4332e-04
Epoch 591/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.3790e-04
Epoch 592/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.3253e-04
Epoch 593/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.2719e-04
Epoch 594/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.2189e-04
Epoch 595/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.1663e-04
Epoch 596/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.1141e-04
Epoch 597/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.0623e-04
Epoch 598/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.0108e-04
Epoch 599/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.9598e-04
Epoch 600/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.9091e-04
Epoch 601/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.8587e-04
Epoch 602/1000
1/1 [==============================] - 0s 833us/step - loss: 6.8087e-04
Epoch 603/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.7592e-04
Epoch 604/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.7099e-04
Epoch 605/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.6610e-04
Epoch 606/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.6125e-04
Epoch 607/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.5643e-04
Epoch 608/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.5165e-04
Epoch 609/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.4690e-04
Epoch 610/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.4219e-04
Epoch 611/1000
1/1 [==============================] - 0s 3ms/step - loss: 6.3751e-04
Epoch 612/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.3286e-04
Epoch 613/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.2826e-04
Epoch 614/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.2368e-04
Epoch 615/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.1913e-04
Epoch 616/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.1462e-04
Epoch 617/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.1015e-04
Epoch 618/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.0570e-04
Epoch 619/1000
1/1 [==============================] - 0s 1ms/step - loss: 6.0129e-04
Epoch 620/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.9690e-04
Epoch 621/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.9256e-04
Epoch 622/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.8824e-04
Epoch 623/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.8395e-04
Epoch 624/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.7970e-04
Epoch 625/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.7548e-04
Epoch 626/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.7128e-04
Epoch 627/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.6712e-04
Epoch 628/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.6299e-04
Epoch 629/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.5889e-04
Epoch 630/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.5482e-04
Epoch 631/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.5077e-04
Epoch 632/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.4676e-04
Epoch 633/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.4278e-04
Epoch 634/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.3882e-04
Epoch 635/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.3490e-04
Epoch 636/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.3100e-04
Epoch 637/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.2713e-04
Epoch 638/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.2329e-04
Epoch 639/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.1948e-04
Epoch 640/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.1570e-04
Epoch 641/1000
1/1 [==============================] - 0s 3ms/step - loss: 5.1194e-04
Epoch 642/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0821e-04
Epoch 643/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0451e-04
Epoch 644/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0083e-04
Epoch 645/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.9718e-04
Epoch 646/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.9356e-04
Epoch 647/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8996e-04
Epoch 648/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8640e-04
Epoch 649/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8285e-04
Epoch 650/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.7933e-04
Epoch 651/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.7584e-04
Epoch 652/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.7238e-04
Epoch 653/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.6893e-04
Epoch 654/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.6552e-04
Epoch 655/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.6212e-04
Epoch 656/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.5876e-04
Epoch 657/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.5542e-04
Epoch 658/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.5210e-04
Epoch 659/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.4881e-04
Epoch 660/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.4554e-04
Epoch 661/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.4229e-04
Epoch 662/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.3907e-04
Epoch 663/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.3587e-04
Epoch 664/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.3269e-04
Epoch 665/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.2954e-04
Epoch 666/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.2641e-04
Epoch 667/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.2330e-04
Epoch 668/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.2022e-04
Epoch 669/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.1716e-04
Epoch 670/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.1412e-04
Epoch 671/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.1110e-04
Epoch 672/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.0811e-04
Epoch 673/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.0513e-04
Epoch 674/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.0218e-04
Epoch 675/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.9925e-04
Epoch 676/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.9634e-04
Epoch 677/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.9346e-04
Epoch 678/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.9059e-04
Epoch 679/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.8774e-04
Epoch 680/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.8492e-04
Epoch 681/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.8211e-04
Epoch 682/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.7933e-04
Epoch 683/1000
1/1 [==============================] - 0s 6ms/step - loss: 3.7657e-04
Epoch 684/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.7382e-04
Epoch 685/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.7110e-04
Epoch 686/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.6840e-04
Epoch 687/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.6571e-04
Epoch 688/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.6305e-04
Epoch 689/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.6040e-04
Epoch 690/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.5778e-04
Epoch 691/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.5517e-04
Epoch 692/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.5258e-04
Epoch 693/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.5001e-04
Epoch 694/1000
1/1 [==============================] - 0s 3ms/step - loss: 3.4746e-04
Epoch 695/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.4493e-04
Epoch 696/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.4242e-04
Epoch 697/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.3992e-04
Epoch 698/1000
1/1 [==============================] - 0s 3ms/step - loss: 3.3745e-04
Epoch 699/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.3499e-04
Epoch 700/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.3255e-04
Epoch 701/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.3013e-04
Epoch 702/1000
1/1 [==============================] - 0s 3ms/step - loss: 3.2772e-04
Epoch 703/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.2533e-04
Epoch 704/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.2296e-04
Epoch 705/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.2061e-04
Epoch 706/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.1827e-04
Epoch 707/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.1595e-04
Epoch 708/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.1365e-04
Epoch 709/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.1137e-04
Epoch 710/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.0910e-04
Epoch 711/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.0685e-04
Epoch 712/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.0461e-04
Epoch 713/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.0239e-04
Epoch 714/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.0019e-04
Epoch 715/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.9800e-04
Epoch 716/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.9583e-04
Epoch 717/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.9368e-04
Epoch 718/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.9154e-04
Epoch 719/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.8941e-04
Epoch 720/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.8730e-04
Epoch 721/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.8521e-04
Epoch 722/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.8313e-04
Epoch 723/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.8107e-04
Epoch 724/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.7902e-04
Epoch 725/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.7699e-04
Epoch 726/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.7497e-04
Epoch 727/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.7297e-04
Epoch 728/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.7098e-04
Epoch 729/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.6901e-04
Epoch 730/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.6705e-04
Epoch 731/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.6510e-04
Epoch 732/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.6317e-04
Epoch 733/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.6125e-04
Epoch 734/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.5935e-04
Epoch 735/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.5746e-04
Epoch 736/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.5558e-04
Epoch 737/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.5372e-04
Epoch 738/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.5187e-04
Epoch 739/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.5004e-04
Epoch 740/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.4821e-04
Epoch 741/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.4641e-04
Epoch 742/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.4461e-04
Epoch 743/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.4283e-04
Epoch 744/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.4106e-04
Epoch 745/1000
1/1 [==============================] - 0s 3ms/step - loss: 2.3931e-04
Epoch 746/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.3756e-04
Epoch 747/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.3583e-04
Epoch 748/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.3411e-04
Epoch 749/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.3241e-04
Epoch 750/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.3071e-04
Epoch 751/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2903e-04
Epoch 752/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2736e-04
Epoch 753/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2571e-04
Epoch 754/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2406e-04
Epoch 755/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2243e-04
Epoch 756/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.2081e-04
Epoch 757/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.1920e-04
Epoch 758/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.1760e-04
Epoch 759/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.1602e-04
Epoch 760/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.1444e-04
Epoch 761/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.1288e-04
Epoch 762/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.1133e-04
Epoch 763/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.0979e-04
Epoch 764/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.0826e-04
Epoch 765/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.0674e-04
Epoch 766/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.0524e-04
Epoch 767/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.0374e-04
Epoch 768/1000
1/1 [==============================] - 0s 2ms/step - loss: 2.0226e-04
Epoch 769/1000
1/1 [==============================] - 0s 1ms/step - loss: 2.0079e-04
Epoch 770/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.9932e-04
Epoch 771/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.9787e-04
Epoch 772/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.9643e-04
Epoch 773/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.9500e-04
Epoch 774/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.9358e-04
Epoch 775/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.9217e-04
Epoch 776/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.9077e-04
Epoch 777/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8938e-04
Epoch 778/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8800e-04
Epoch 779/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8663e-04
Epoch 780/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8527e-04
Epoch 781/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.8392e-04
Epoch 782/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8258e-04
Epoch 783/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.8125e-04
Epoch 784/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.7993e-04
Epoch 785/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.7862e-04
Epoch 786/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.7732e-04
Epoch 787/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.7602e-04
Epoch 788/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.7474e-04
Epoch 789/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.7347e-04
Epoch 790/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.7220e-04
Epoch 791/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.7095e-04
Epoch 792/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6970e-04
Epoch 793/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6847e-04
Epoch 794/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6724e-04
Epoch 795/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6602e-04
Epoch 796/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6481e-04
Epoch 797/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6361e-04
Epoch 798/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.6242e-04
Epoch 799/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6124e-04
Epoch 800/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.6006e-04
Epoch 801/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5889e-04
Epoch 802/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5774e-04
Epoch 803/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5659e-04
Epoch 804/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5545e-04
Epoch 805/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5432e-04
Epoch 806/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.5319e-04
Epoch 807/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5207e-04
Epoch 808/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.5097e-04
Epoch 809/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4987e-04
Epoch 810/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4877e-04
Epoch 811/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4769e-04
Epoch 812/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4662e-04
Epoch 813/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4555e-04
Epoch 814/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.4449e-04
Epoch 815/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.4343e-04
Epoch 816/1000
1/1 [==============================] - 0s 6ms/step - loss: 1.4239e-04
Epoch 817/1000
1/1 [==============================] - 0s 3ms/step - loss: 1.4135e-04
Epoch 818/1000
1/1 [==============================] - 0s 3ms/step - loss: 1.4032e-04
Epoch 819/1000
1/1 [==============================] - 0s 3ms/step - loss: 1.3930e-04
Epoch 820/1000
1/1 [==============================] - 0s 3ms/step - loss: 1.3828e-04
Epoch 821/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3728e-04
Epoch 822/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3628e-04
Epoch 823/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3528e-04
Epoch 824/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3430e-04
Epoch 825/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3332e-04
Epoch 826/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3235e-04
Epoch 827/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3138e-04
Epoch 828/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.3043e-04
Epoch 829/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2948e-04
Epoch 830/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2853e-04
Epoch 831/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2760e-04
Epoch 832/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.2667e-04
Epoch 833/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2574e-04
Epoch 834/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2483e-04
Epoch 835/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2392e-04
Epoch 836/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2302e-04
Epoch 837/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2212e-04
Epoch 838/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.2123e-04
Epoch 839/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.2035e-04
Epoch 840/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1947e-04
Epoch 841/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1860e-04
Epoch 842/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1774e-04
Epoch 843/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.1688e-04
Epoch 844/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.1603e-04
Epoch 845/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.1518e-04
Epoch 846/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1434e-04
Epoch 847/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1351e-04
Epoch 848/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.1268e-04
Epoch 849/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1186e-04
Epoch 850/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.1105e-04
Epoch 851/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.1024e-04
Epoch 852/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0943e-04
Epoch 853/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0864e-04
Epoch 854/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0785e-04
Epoch 855/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0706e-04
Epoch 856/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0628e-04
Epoch 857/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0551e-04
Epoch 858/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0474e-04
Epoch 859/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0397e-04
Epoch 860/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0322e-04
Epoch 861/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0246e-04
Epoch 862/1000
1/1 [==============================] - 0s 1ms/step - loss: 1.0172e-04
Epoch 863/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0098e-04
Epoch 864/1000
1/1 [==============================] - 0s 2ms/step - loss: 1.0024e-04
Epoch 865/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.9511e-05
Epoch 866/1000
1/1 [==============================] - 0s 1ms/step - loss: 9.8785e-05
Epoch 867/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.8066e-05
Epoch 868/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.7352e-05
Epoch 869/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.6642e-05
Epoch 870/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.5938e-05
Epoch 871/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.5240e-05
Epoch 872/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.4546e-05
Epoch 873/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.3856e-05
Epoch 874/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.3173e-05
Epoch 875/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.2494e-05
Epoch 876/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.1820e-05
Epoch 877/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.1151e-05
Epoch 878/1000
1/1 [==============================] - 0s 2ms/step - loss: 9.0487e-05
Epoch 879/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.9828e-05
Epoch 880/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.9174e-05
Epoch 881/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.8524e-05
Epoch 882/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.7879e-05
Epoch 883/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.7239e-05
Epoch 884/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.6603e-05
Epoch 885/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.5972e-05
Epoch 886/1000
1/1 [==============================] - 0s 1ms/step - loss: 8.5346e-05
Epoch 887/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.4724e-05
Epoch 888/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.4107e-05
Epoch 889/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.3494e-05
Epoch 890/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.2885e-05
Epoch 891/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.2282e-05
Epoch 892/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.1682e-05
Epoch 893/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.1088e-05
Epoch 894/1000
1/1 [==============================] - 0s 2ms/step - loss: 8.0497e-05
Epoch 895/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.9910e-05
Epoch 896/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.9328e-05
Epoch 897/1000
1/1 [==============================] - 0s 4ms/step - loss: 7.8751e-05
Epoch 898/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.8177e-05
Epoch 899/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.7607e-05
Epoch 900/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.7042e-05
Epoch 901/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.6481e-05
Epoch 902/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.5923e-05
Epoch 903/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.5371e-05
Epoch 904/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.4822e-05
Epoch 905/1000
1/1 [==============================] - 0s 1ms/step - loss: 7.4277e-05
Epoch 906/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.3735e-05
Epoch 907/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.3198e-05
Epoch 908/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.2664e-05
Epoch 909/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.2135e-05
Epoch 910/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.1610e-05
Epoch 911/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.1087e-05
Epoch 912/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.0570e-05
Epoch 913/1000
1/1 [==============================] - 0s 2ms/step - loss: 7.0056e-05
Epoch 914/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.9546e-05
Epoch 915/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.9039e-05
Epoch 916/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.8536e-05
Epoch 917/1000
1/1 [==============================] - 0s 3ms/step - loss: 6.8036e-05
Epoch 918/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.7540e-05
Epoch 919/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.7049e-05
Epoch 920/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.6560e-05
Epoch 921/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.6076e-05
Epoch 922/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.5594e-05
Epoch 923/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.5116e-05
Epoch 924/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.4642e-05
Epoch 925/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.4171e-05
Epoch 926/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.3703e-05
Epoch 927/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.3239e-05
Epoch 928/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.2779e-05
Epoch 929/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.2321e-05
Epoch 930/1000
1/1 [==============================] - 0s 3ms/step - loss: 6.1867e-05
Epoch 931/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.1416e-05
Epoch 932/1000
1/1 [==============================] - 0s 10ms/step - loss: 6.0969e-05
Epoch 933/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.0524e-05
Epoch 934/1000
1/1 [==============================] - 0s 2ms/step - loss: 6.0083e-05
Epoch 935/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.9646e-05
Epoch 936/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.9211e-05
Epoch 937/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.8779e-05
Epoch 938/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.8352e-05
Epoch 939/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.7926e-05
Epoch 940/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.7504e-05
Epoch 941/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.7084e-05
Epoch 942/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.6669e-05
Epoch 943/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.6257e-05
Epoch 944/1000
1/1 [==============================] - 0s 3ms/step - loss: 5.5846e-05
Epoch 945/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.5440e-05
Epoch 946/1000
1/1 [==============================] - 0s 3ms/step - loss: 5.5036e-05
Epoch 947/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.4635e-05
Epoch 948/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.4237e-05
Epoch 949/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.3842e-05
Epoch 950/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.3450e-05
Epoch 951/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.3060e-05
Epoch 952/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.2673e-05
Epoch 953/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.2290e-05
Epoch 954/1000
1/1 [==============================] - 0s 1ms/step - loss: 5.1909e-05
Epoch 955/1000
1/1 [==============================] - 0s 4ms/step - loss: 5.1530e-05
Epoch 956/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.1155e-05
Epoch 957/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0782e-05
Epoch 958/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0413e-05
Epoch 959/1000
1/1 [==============================] - 0s 2ms/step - loss: 5.0045e-05
Epoch 960/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.9680e-05
Epoch 961/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.9318e-05
Epoch 962/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8959e-05
Epoch 963/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8602e-05
Epoch 964/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.8249e-05
Epoch 965/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.7897e-05
Epoch 966/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.7548e-05
Epoch 967/1000
1/1 [==============================] - 0s 3ms/step - loss: 4.7202e-05
Epoch 968/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.6858e-05
Epoch 969/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.6517e-05
Epoch 970/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.6178e-05
Epoch 971/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.5841e-05
Epoch 972/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.5507e-05
Epoch 973/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.5175e-05
Epoch 974/1000
1/1 [==============================] - 0s 3ms/step - loss: 4.4847e-05
Epoch 975/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.4520e-05
Epoch 976/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.4196e-05
Epoch 977/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.3873e-05
Epoch 978/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.3554e-05
Epoch 979/1000
1/1 [==============================] - 0s 5ms/step - loss: 4.3237e-05
Epoch 980/1000
1/1 [==============================] - 0s 3ms/step - loss: 4.2921e-05
Epoch 981/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.2609e-05
Epoch 982/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.2299e-05
Epoch 983/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.1990e-05
Epoch 984/1000
1/1 [==============================] - 0s 2ms/step - loss: 4.1684e-05
Epoch 985/1000
1/1 [==============================] - 0s 4ms/step - loss: 4.1380e-05
Epoch 986/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.1079e-05
Epoch 987/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.0780e-05
Epoch 988/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.0483e-05
Epoch 989/1000
1/1 [==============================] - 0s 1ms/step - loss: 4.0188e-05
Epoch 990/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.9895e-05
Epoch 991/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.9605e-05
Epoch 992/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.9315e-05
Epoch 993/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.9029e-05
Epoch 994/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.8745e-05
Epoch 995/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.8463e-05
Epoch 996/1000
1/1 [==============================] - 0s 2ms/step - loss: 3.8183e-05
Epoch 997/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.7904e-05
Epoch 998/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.7628e-05
Epoch 999/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.7354e-05
Epoch 1000/1000
1/1 [==============================] - 0s 1ms/step - loss: 3.7081e-05
[[4.008784]]
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([1.0,2.0,3.0,4.0,5.0,6.0])
ys = np.array([1.0,1.5,2.0,2.5,3.0,3.5])
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=800)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=600)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])])
model.compile(optimizer = "sgd",loss="mean_squared_error")
xs = np.array([0,1,2,3,4,5,6],dtype= float)
ys = np.array([0.5,1.0,1.5,2.0,2.5,3.0,3.5],dtype = float)
model.fit(xs,ys,epochs=330)
print(model.predict([7.0]))
###Output
Epoch 1/330
1/1 [==============================] - 0s 2ms/step - loss: 8.7738
Epoch 2/330
1/1 [==============================] - 0s 2ms/step - loss: 4.6886
Epoch 3/330
1/1 [==============================] - 0s 2ms/step - loss: 2.5355
Epoch 4/330
1/1 [==============================] - 0s 2ms/step - loss: 1.4005
Epoch 5/330
1/1 [==============================] - 0s 2ms/step - loss: 0.8017
Epoch 6/330
1/1 [==============================] - 0s 2ms/step - loss: 0.4855
Epoch 7/330
1/1 [==============================] - 0s 1ms/step - loss: 0.3182
Epoch 8/330
1/1 [==============================] - 0s 1ms/step - loss: 0.2294
Epoch 9/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1818
Epoch 10/330
1/1 [==============================] - 0s 2ms/step - loss: 0.1561
Epoch 11/330
1/1 [==============================] - 0s 2ms/step - loss: 0.1418
Epoch 12/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1336
Epoch 13/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1286
Epoch 14/330
1/1 [==============================] - 0s 2ms/step - loss: 0.1252
Epoch 15/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1228
Epoch 16/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1209
Epoch 17/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1192
Epoch 18/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1177
Epoch 19/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1162
Epoch 20/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1149
Epoch 21/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1135
Epoch 22/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1122
Epoch 23/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1109
Epoch 24/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1096
Epoch 25/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1083
Epoch 26/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1070
Epoch 27/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1058
Epoch 28/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1045
Epoch 29/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1033
Epoch 30/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1021
Epoch 31/330
1/1 [==============================] - 0s 1ms/step - loss: 0.1009
Epoch 32/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0998
Epoch 33/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0986
Epoch 34/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0975
Epoch 35/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0963
Epoch 36/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0952
Epoch 37/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0941
Epoch 38/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0930
Epoch 39/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0919
Epoch 40/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0908
Epoch 41/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0898
Epoch 42/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0887
Epoch 43/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0877
Epoch 44/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0867
Epoch 45/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0857
Epoch 46/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0847
Epoch 47/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0837
Epoch 48/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0827
Epoch 49/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0818
Epoch 50/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0808
Epoch 51/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0799
Epoch 52/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0789
Epoch 53/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0780
Epoch 54/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0771
Epoch 55/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0762
Epoch 56/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0753
Epoch 57/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0745
Epoch 58/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0736
Epoch 59/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0727
Epoch 60/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0719
Epoch 61/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0710
Epoch 62/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0702
Epoch 63/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0694
Epoch 64/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0686
Epoch 65/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0678
Epoch 66/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0670
Epoch 67/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0662
Epoch 68/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0655
Epoch 69/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0647
Epoch 70/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0639
Epoch 71/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0632
Epoch 72/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0625
Epoch 73/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0617
Epoch 74/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0610
Epoch 75/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0603
Epoch 76/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0596
Epoch 77/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0589
Epoch 78/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0582
Epoch 79/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0575
Epoch 80/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0569
Epoch 81/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0562
Epoch 82/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0556
Epoch 83/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0549
Epoch 84/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0543
Epoch 85/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0536
Epoch 86/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0530
Epoch 87/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0524
Epoch 88/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0518
Epoch 89/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0512
Epoch 90/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0506
Epoch 91/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0500
Epoch 92/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0494
Epoch 93/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0489
Epoch 94/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0483
Epoch 95/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0477
Epoch 96/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0472
Epoch 97/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0466
Epoch 98/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0461
Epoch 99/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0455
Epoch 100/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0450
Epoch 101/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0445
Epoch 102/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0440
Epoch 103/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0435
Epoch 104/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0429
Epoch 105/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0424
Epoch 106/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0420
Epoch 107/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0415
Epoch 108/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0410
Epoch 109/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0405
Epoch 110/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0400
Epoch 111/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0396
Epoch 112/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0391
Epoch 113/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0387
Epoch 114/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0382
Epoch 115/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0378
Epoch 116/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0373
Epoch 117/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0369
Epoch 118/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0365
Epoch 119/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0360
Epoch 120/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0356
Epoch 121/330
1/1 [==============================] - 0s 828us/step - loss: 0.0352
Epoch 122/330
1/1 [==============================] - 0s 745us/step - loss: 0.0348
Epoch 123/330
1/1 [==============================] - 0s 897us/step - loss: 0.0344
Epoch 124/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0340
Epoch 125/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0336
Epoch 126/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0332
Epoch 127/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0328
Epoch 128/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0324
Epoch 129/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0321
Epoch 130/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0317
Epoch 131/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0313
Epoch 132/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0309
Epoch 133/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0306
Epoch 134/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0302
Epoch 135/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0299
Epoch 136/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0295
Epoch 137/330
1/1 [==============================] - 0s 5ms/step - loss: 0.0292
Epoch 138/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0288
Epoch 139/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0285
Epoch 140/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0282
Epoch 141/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0279
Epoch 142/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0275
Epoch 143/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0272
Epoch 144/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0269
Epoch 145/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0266
Epoch 146/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0263
Epoch 147/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0260
Epoch 148/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0257
Epoch 149/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0254
Epoch 150/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0251
Epoch 151/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0248
Epoch 152/330
1/1 [==============================] - 0s 6ms/step - loss: 0.0245
Epoch 153/330
1/1 [==============================] - 0s 6ms/step - loss: 0.0242
Epoch 154/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0239
Epoch 155/330
1/1 [==============================] - 0s 4ms/step - loss: 0.0236
Epoch 156/330
1/1 [==============================] - 0s 5ms/step - loss: 0.0234
Epoch 157/330
1/1 [==============================] - 0s 5ms/step - loss: 0.0231
Epoch 158/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0228
Epoch 159/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0226
Epoch 160/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0223
Epoch 161/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0220
Epoch 162/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0218
Epoch 163/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0215
Epoch 164/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0213
Epoch 165/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0210
Epoch 166/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0208
Epoch 167/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0205
Epoch 168/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0203
Epoch 169/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0201
Epoch 170/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0198
Epoch 171/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0196
Epoch 172/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0194
Epoch 173/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0191
Epoch 174/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0189
Epoch 175/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0187
Epoch 176/330
1/1 [==============================] - 0s 989us/step - loss: 0.0185
Epoch 177/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0183
Epoch 178/330
1/1 [==============================] - 0s 995us/step - loss: 0.0181
Epoch 179/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0179
Epoch 180/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0176
Epoch 181/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0174
Epoch 182/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0172
Epoch 183/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0170
Epoch 184/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0168
Epoch 185/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0166
Epoch 186/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0164
Epoch 187/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0163
Epoch 188/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0161
Epoch 189/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0159
Epoch 190/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0157
Epoch 191/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0155
Epoch 192/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0153
Epoch 193/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0152
Epoch 194/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0150
Epoch 195/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0148
Epoch 196/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0146
Epoch 197/330
1/1 [==============================] - 0s 961us/step - loss: 0.0145
Epoch 198/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0143
Epoch 199/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0141
Epoch 200/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0140
Epoch 201/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0138
Epoch 202/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0136
Epoch 203/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0135
Epoch 204/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0133
Epoch 205/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0132
Epoch 206/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0130
Epoch 207/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0129
Epoch 208/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0127
Epoch 209/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0126
Epoch 210/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0124
Epoch 211/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0123
Epoch 212/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0121
Epoch 213/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0120
Epoch 214/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0119
Epoch 215/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0117
Epoch 216/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0116
Epoch 217/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0114
Epoch 218/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0113
Epoch 219/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0112
Epoch 220/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0110
Epoch 221/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0109
Epoch 222/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0108
Epoch 223/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0107
Epoch 224/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0105
Epoch 225/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0104
Epoch 226/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0103
Epoch 227/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0102
Epoch 228/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0101
Epoch 229/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0099
Epoch 230/330
1/1 [==============================] - 0s 944us/step - loss: 0.0098
Epoch 231/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0097
Epoch 232/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0096
Epoch 233/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0095
Epoch 234/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0094
Epoch 235/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0093
Epoch 236/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0092
Epoch 237/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0091
Epoch 238/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0089
Epoch 239/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0088
Epoch 240/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0087
Epoch 241/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0086
Epoch 242/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0085
Epoch 243/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0084
Epoch 244/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0083
Epoch 245/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0082
Epoch 246/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0081
Epoch 247/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0081
Epoch 248/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0080
Epoch 249/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0079
Epoch 250/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0078
Epoch 251/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0077
Epoch 252/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0076
Epoch 253/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0075
Epoch 254/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0074
Epoch 255/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0073
Epoch 256/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0072
Epoch 257/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0072
Epoch 258/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0071
Epoch 259/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0070
Epoch 260/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0069
Epoch 261/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0068
Epoch 262/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0068
Epoch 263/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0067
Epoch 264/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0066
Epoch 265/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0065
Epoch 266/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0064
Epoch 267/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0064
Epoch 268/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0063
Epoch 269/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0062
Epoch 270/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0062
Epoch 271/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0061
Epoch 272/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0060
Epoch 273/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0059
Epoch 274/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0059
Epoch 275/330
1/1 [==============================] - 0s 997us/step - loss: 0.0058
Epoch 276/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0057
Epoch 277/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0057
Epoch 278/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0056
Epoch 279/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0055
Epoch 280/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0055
Epoch 281/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0054
Epoch 282/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0053
Epoch 283/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0053
Epoch 284/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0052
Epoch 285/330
1/1 [==============================] - 0s 3ms/step - loss: 0.0052
Epoch 286/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0051
Epoch 287/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0050
Epoch 288/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0050
Epoch 289/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0049
Epoch 290/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0049
Epoch 291/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 292/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0048
Epoch 293/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0047
Epoch 294/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0046
Epoch 295/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0046
Epoch 296/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0045
Epoch 297/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0045
Epoch 298/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0044
Epoch 299/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0044
Epoch 300/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0043
Epoch 301/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0043
Epoch 302/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0042
Epoch 303/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0042
Epoch 304/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0041
Epoch 305/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0041
Epoch 306/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0040
Epoch 307/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0040
Epoch 308/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0039
Epoch 309/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0039
Epoch 310/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0039
Epoch 311/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0038
Epoch 312/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0038
Epoch 313/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0037
Epoch 314/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0037
Epoch 315/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0036
Epoch 316/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0036
Epoch 317/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0035
Epoch 318/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0035
Epoch 319/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0035
Epoch 320/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0034
Epoch 321/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0034
Epoch 322/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0033
Epoch 323/330
1/1 [==============================] - 0s 2ms/step - loss: 0.0033
Epoch 324/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0033
Epoch 325/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0032
Epoch 326/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0032
Epoch 327/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0032
Epoch 328/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0031
Epoch 329/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0031
Epoch 330/330
1/1 [==============================] - 0s 1ms/step - loss: 0.0030
[[4.0645213]]
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([0, 1, 2, 3, 4, 5], dtype=int)
ys = np.array([0.5, 1.0, 1.5, 2, 2.5, 3.0], dtype=float)
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=500, verbose=0)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____
###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([ 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
model.fit(xs, ys, epochs = 500)
print(model.predict([7.0]))
###Output
_____no_output_____
###Markdown
In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([1,2,6,7,8,9,2,3,4,5,6])
ys = np.array([1.0,1.5,3.5,4.0,4.5,5.0,1.5,2.0,2.5,3.0,3.5])
model.fit(xs,ys,epochs=500)
print(model.predict([7.0]))
###Output
_____no_output_____ |
tutorial-contents-notebooks/302_classification.ipynb | ###Markdown
302 ClassificationView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhou
###Code
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
print(net) # net architecture
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
plt.ion() # something about plotting
for t in range(100):
out = net(x) # input x and predict based on x
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 5 == 0 :
# plot and show learning process
plt.cla()
_, prediction = torch.max(F.softmax(out, dim=1), 1)
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200.
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
###Output
_____no_output_____
###Markdown
302 ClassificationView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.1.11* matplotlib
###Code
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
print(net) # net architecture
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
plt.ion() # something about plotting
for t in range(100):
out = net(x) # input x and predict based on x
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0 or t in [3, 6]:
# plot and show learning process
plt.cla()
_, prediction = torch.max(F.softmax(out), 1)
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200.
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
###Output
_____no_output_____
###Markdown
302 ClassificationView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.1.11* matplotlib
###Code
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
# torch.normal(mean, std): Returns a tensor of random numbers drawn from separate normal distributions
# whose mean and standard deviation are given.
# this generate random numbers centered at 2 and have std=1
x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2), same as that of n_data
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, )
x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2), same as that of n_data
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, )
# torch.cat(seq, dim=0, out=None)
# Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating
# dimension) or be empty.
# Returns the type if `dtype` is not provided, else casts this object to
# the specified type.
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
# the result of torch.cat((y0, y1), ) is same as that of torch.cat((y0, y1), 0)
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
# for cmap, check https://matplotlib.org/users/colormaps.html
# 'RdYlGn' has red for small value, yellow for medium value, green for large value
# s: The marker size in points**2.
# lw: The linewidth of the marker edges.
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
print(net) # net architecture
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
plt.ion() # something about plotting
for t in range(100):
out = net(x) # input x and predict based on x, there is negative element and some element greater than 1
#out.shape(200,2), y.shape: (200,)
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0 or t in [3, 6]:
# plot and show learning process
plt.cla()
#add dim=1 in F.softmax to suppress the warning:
#/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:12:
#UserWarning: Implicit dimension choice for softmax has been deprecated.
#Change the call to include dim=X as an argument.
#if sys.path[0] == '':
_, prediction = torch.max(F.softmax(out, dim=1), 1)
# squeeze is not neccessary here, the shape of prediction.data.numpy() is (200,) already
pred_y = prediction.data.numpy() #.squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200.
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
###Output
_____no_output_____
###Markdown
302 ClassificationView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 1.4.0* matplotlib
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
#torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(1.5*n_data, 1) # 100个点由均值为(2,2),方差为1的正态分布给出
#x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-1.5*n_data, 1) # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')
plt.show()
class Net(nn.Module):
def __init__(self, n_feature, n_hidden1, n_output):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_feature, n_hidden1)
self.fc2 = nn.Linear(n_hidden1, n_output)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
net = Net(n_feature=2, n_hidden1=10, n_output=2)
print(net)
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1)
plt.ion() # something about plotting
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
net.to(device)
inputs = x.to(device)
labels = y.to(device)
for t in range(100):
net.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if t % 10 == 9 or t in [1, 3, 6]:
plt.cla()
_, predictions = torch.max(outputs, 1)
plt.scatter(x[:, 0], x[:, 1], c=predictions.cpu().detach().numpy(), s=100, lw=0, cmap='RdYlGn')
accuracy = sum(labels == predictions)/200.0
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
'''
for t in range(100):
out = net(x) # input x and predict based on x
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0 or t in [3, 6]:
# plot and show learning process
plt.cla()
_, prediction = torch.max(F.softmax(out), 1)
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200.
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
'''
output = torch.randn(1, 5, requires_grad = True) #假设是网络的最后一层,5分类
label = torch.empty(1, dtype=torch.long).random_(5) # 0 - 4, 任意选取一个分类
print ('Network Output is: ', output)
print ('Ground Truth Label is: ', label)
###Output
Network Output is: tensor([[-1.1254, 0.5455, -0.3320, 0.2497, 0.1249]], requires_grad=True)
Ground Truth Label is: tensor([3])
###Markdown
302 ClassificationView more, visit my tutorial page: https://mofanpy.com/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.1.11* matplotlib
###Code
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(1) # reproducible
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
print(net) # net architecture
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
plt.ion() # something about plotting
for t in range(100):
out = net(x) # input x and predict based on x
loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0 or t in [3, 6]:
# plot and show learning process
plt.cla()
_, prediction = torch.max(F.softmax(out), 1)
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/200.
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
###Output
_____no_output_____ |
boards/ZCU104/notebooks/prio/uart.ipynb | ###Markdown
UART DemoThis demo highlights the usefulness of using a more complex MMIO driver wrapper by implementing a wrapper to interact with UART hardware. This wrapper is included in the notebook as an example of how to create a more complicated MMIO driver, including how to interact with interrupts.
###Code
import asyncio
import time
from pynq import Interrupt
class UART() :
RX_OFFSET = 0x00
TX_OFFSET = 0x04
STATUS_OFFSET = 0x08
CONTROL_OFFSET = 0x0C
RX_AVAIL_BIT = 0x01
RX_FULL_BIT = 0x02
TX_EMPTY_BIT = 0x04
TX_FULL_BIT = 0x08
RST_FIFO_BIT = 0x03
CTRL_BIT_EN_INT = 0x10
CTRL_BIT_DIS_INT = 0XEF
def __init__(self, pr_region, name=None):
self._mmio = pr_region.S_AXI.mmio
interruptPin = str(pr_region.description.get('fullpath')) + "/axi_uartlite_0/interrupt"
interrupt = overlay.interrupt_pins[interruptPin]['fullpath']
if name is None:
self.name = "UART_" + str(pr_region.description.get('fullpath'))
else:
self.name = name
self.interrupt = Interrupt(interrupt)
def txReady(self):
cur_val = self._mmio.read(self.STATUS_OFFSET)
return not (cur_val & self.TX_FULL_BIT)
def rxAvail(self):
cur_val = self._mmio.read(self.STATUS_OFFSET)
return (cur_val & self.RX_AVAIL_BIT) == self.RX_AVAIL_BIT
def enableInterrupts(self, enable):
ctrl = self._mmio.read(self.CONTROL_OFFSET)
if enable:
ctrl |= self.CTRL_BIT_EN_INT
else:
ctrl &= self.CTRL_BIT_DIS_INT
self._mmio.write(self.CONTROL_OFFSET, ctrl)
def write(self, msg):
for b in msg:
# Wait for ready to send
while not self.txReady():
pass
# Send data
self.writeTxByte(b)
def readRxByte(self):
byte = self._mmio.read(self.RX_OFFSET)
return (byte & 0xff)
def writeTxByte(self, byte):
# Wait for ready to send
while not self.txReady():
pass
self._mmio.write(self.TX_OFFSET, byte)
#timeout_secs can be initialized to None to disable timeout
def read(self, size=1, timeout_secs=1):
recvd = []
timeout = _Timeout(timeout_secs)
while len(recvd) < size:
#waits for data to be available
while not self.rxAvail() and not timeout.expired():
pass
#exits if time has expired.
if timeout.expired():
break
b=self.readRxByte()
recvd.append(b)
return recvd
def printStatus(self):
status = self._mmio.read(self.STATUS_OFFSET)
print(self.name + " status:")
print("\tRX Available: " + str((status & self.RX_AVAIL_BIT) == self.RX_AVAIL_BIT))
print("\tRX Full: " + str((status & self.RX_FULL_BIT) == self.RX_FULL_BIT))
print("\tTX Empty: " + str((status & self.TX_EMPTY_BIT) == self.TX_EMPTY_BIT))
print("\tTX Full: " + str((status & self.TX_FULL_BIT) == self.TX_FULL_BIT))
print("\tInterrupts Enabled: " + str((status & self.CTRL_BIT_EN_INT) == self.CTRL_BIT_EN_INT))
def resetFIFOs(self):
self._mmio.write(self.CONTROL_OFFSET, self.RST_FIFO_BIT)
# Run this interrupt handler until all messages have been received
# msg_size - Number of bytes to wait for (if 0, run forever)
async def isr_recv(self, msg_size = 0):
recvd_msg = []
while True:
await self.interrupt.wait()
if self.rxAvail():
recvd = self.readRxByte()
recvd_msg.append(recvd)
if msg_size > 0:
print(self.name + " isr received byte #" + str(len(recvd_msg)) + \
" of " + str(msg_size) + ": " + hex(recvd))
if (len(recvd_msg) == msg_size):
return recvd_msg
else:
print(self.name + " isr received byte #" + str(len(recvd_msg)) + ": " + hex(recvd))
# This class is part of pySerial. https://github.com/pyseraial/pyserial
# (C) 2001-2016 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
class _Timeout(object):
"""\
Abstraction for timeout operations. Using time.monotonic() if available
or time.time() in all other cases.
The class can also be initialized with 0 or None, in order to support
non-blocking and fully blocking I/O operations. The attributes
is_non_blocking and is_infinite are set accordingly.
"""
if hasattr(time, 'monotonic'):
# Timeout implementation with time.monotonic(). This function is only
# supported by Python 3.3 and above. It returns a time in seconds
# (float) just as time.time(), but is not affected by system clock
# adjustments.
TIME = time.monotonic
else:
# Timeout implementation with time.time(). This is compatible with all
# Python versions but has issues if the clock is adjusted while the
# timeout is running.
TIME = time.time
def __init__(self, duration):
"""Initialize a timeout with given duration"""
self.is_infinite = (duration is None)
self.is_non_blocking = (duration == 0)
self.duration = duration
if duration is not None:
self.target_time = self.TIME() + duration
else:
self.target_time = None
def expired(self):
"""Return a boolean, telling if the timeout has expired"""
return self.target_time is not None and self.time_left() <= 0
def time_left(self):
"""Return how many seconds are left until the timeout expires"""
if self.is_non_blocking:
return 0
elif self.is_infinite:
return None
else:
delta = self.target_time - self.TIME()
if delta > self.duration:
# clock jumped, recalculate
self.target_time = self.TIME() + self.duration
return self.duration
else:
return max(0, delta)
def restart(self, duration):
"""\
Restart a timeout, only supported if a timeout was already set up
before.
"""
self.duration = duration
self.target_time = self.TIME() + duration
###Output
_____no_output_____
###Markdown
Download the static bitstreamWe first need to download the static or full bitstream before any partial bitstreams can be downloaded. Note that if the bitstream is not in the same directory as the notebook then the full path needs to be provided.
###Code
from prio.prio import PrIoOverlay
FULL_BITSTREAM_PATH = "/usr/local/lib/python3.6/dist-packages/prio/"
PARTIAL_BITSTREAM_PATH = "/usr/local/lib/python3.6/dist-packages/prio/partial/"
overlay = PrIoOverlay(FULL_BITSTREAM_PATH + "prio.bit")
###Output
_____no_output_____
###Markdown
Set up the reconfigurable regionNotice that as with the full bitstream, the full path to the partial bitstream must be provided when it is located outside of the current notebook's directory.We will download partial bitstream and initialize each uart driver.
###Code
overlay.pr_download("pr_1", PARTIAL_BITSTREAM_PATH + "pr_1_uart.bit")
uart1 = UART(overlay.pr_1)
overlay.pr_download("pr_3", PARTIAL_BITSTREAM_PATH + "pr_3_uart.bit")
uart3 = UART(overlay.pr_3)
###Output
_____no_output_____
###Markdown
Demo: Print UART StatusPrints the status of both of the UART modules.
###Code
uart1.resetFIFOs()
uart3.resetFIFOs()
uart1.printStatus()
uart3.printStatus()
###Output
UART_pr_1 status:
RX Available: False
RX Full: False
TX Empty: True
TX Full: False
Interrupts Enabled: False
UART_pr_3 status:
RX Available: False
RX Full: False
TX Empty: True
TX Full: False
Interrupts Enabled: False
###Markdown
Demo: Bidirectional UART MessagesThis cell will transmit a message back and forth between partial region 1 and partial region 3. After running the cell you will see output showing the message that was sent and the message that was recieved, going both directions.**Hardware setup:** For this demo you should connect a wire between the top right pin of **`PMOD 1`** (uart1 RX) and the second most right bottom pin of **`PMOD 1`** (uart3 TX), and a second wire between the bottom right pin of **`PMOD 1`** (uart1 TX) and the second most right top **`PMOD 1`** (uart3 RX). (The two wires should criss-cross)
###Code
# Note: Because of the 16 byte nature of the FIFO registers, these 32 byte
# messages may occassionally fail to transmit properly. To ensure safe
# transmittion of data transmit 16 bytes at a time.
uart1.resetFIFOs()
uart3.resetFIFOs()
message = "Sending Data from uart1 to uart3"
print(message)
size = len(message)
uart1.write(message.encode())
recieved = uart3.read(size)
recieved = bytes(recieved).decode()
if recieved == message:
print("Success! Message Recieved: " + recieved)
else:
print("Failure: Message Recieved: " + recieved)
message = "Sending Data from uart3 to uart1"
print(message)
size = len(message)
uart3.write(message.encode())
recieved = uart1.read(size)
recieved = bytes(recieved).decode()
if recieved == message:
print("Success! Message Recieved: " + recieved)
else:
print("Failure: Message Recieved: " + recieved)
###Output
Sending Data from uart1 to uart3
Success! Message Recieved: Sending Data from uart1 to uart3
Sending Data from uart3 to uart1
Success! Message Recieved: Sending Data from uart3 to uart1
###Markdown
Demo: Bidirectional UART Messages with InterruptsThis demo is similar to the demo above, but this time it will utilize the interrupt functionality present in the PR regions. For the ZCU104, an interupt class attribute is automatically created each time an instance of the UART class is created, so we can use the same UART objects for the cell below.**Hardware setup:** _(Same as previous demo)_ For this demo you should connect a wire between the top right pin of **`PMOD 1`** (uart1 RX) and the second most right bottom pin of **`PMOD 1`** (uart3 TX), and a second wire between the bottom right pin of **`PMOD 1`** (uart1 TX) and the second most right top **`PMOD 1`** (uart3 RX). (the two wires should criss-cross)
###Code
msg = [0xde, 0xad, 0xbe, 0xef]
uart1.resetFIFOs()
uart3.resetFIFOs()
# Send message from uart 1 to uart 3
print("***** Sending message: " + '[{}]'.format(', '.join(hex(x) for x in msg)) + "*****")
uart3.enableInterrupts(True)
uart1.write(msg)
recvd = asyncio.get_event_loop().run_until_complete(uart3.isr_recv(len(msg)))
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + '[{}]'.format(', '.join(hex(x) for x in recvd)) + ")")
# Send message from uart 3 to uart 1
print("\n***** Sending message: " + '[{}]'.format(', '.join(hex(x) for x in msg)) + "*****")
uart1.enableInterrupts(True)
uart3.write(msg)
recvd = asyncio.get_event_loop().run_until_complete(uart1.isr_recv(len(msg)))
if recvd == msg:
print("Success: correct message received")
else:
print("Failure: message received: (" + '[{}]'.format(', '.join(hex(x) for x in recvd)) + ")")
###Output
***** Sending message: [0xde, 0xad, 0xbe, 0xef]*****
UART_pr_3 isr received byte #1 of 4: 0xde
UART_pr_3 isr received byte #2 of 4: 0xad
UART_pr_3 isr received byte #3 of 4: 0xbe
UART_pr_3 isr received byte #4 of 4: 0xef
Success: correct message received
***** Sending message: [0xde, 0xad, 0xbe, 0xef]*****
UART_pr_1 isr received byte #1 of 4: 0xde
UART_pr_1 isr received byte #2 of 4: 0xad
UART_pr_1 isr received byte #3 of 4: 0xbe
UART_pr_1 isr received byte #4 of 4: 0xef
Success: correct message received
|
Daily_Coding_Problem#4.ipynb | ###Markdown
Given an array of integers, find the first missing positive integer in linear time and constant space. In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and negative numbers as well.For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.You can modify the input array in-place.
###Code
#O(N) space
def find(li):
min=9999999
for ele in li:
if ele>0 and ele<min:
min=ele
s=set(li)
if min==1:
min=2
while min in s:
min+=1
pass
return min
else:
min=1
return min
li=[int(x) for x in input().split()]
find(li)
#O(N) space
def first_missing_positive(nums):
s = set(nums)
i = 1
while i in s:
i += 1
return i
#O(1) space
def first_missing_positive(nums):
if not nums:
return 1
for i, num in enumerate(nums):
while i + 1 != nums[i] and 0 < nums[i] <= len(nums):
v = nums[i]
nums[i], nums[v - 1] = nums[v - 1], nums[i]
if nums[i] == nums[v - 1]:
break
for i, num in enumerate(nums, 1):
if num != i:
return i
return len(nums) + 1
###Output
_____no_output_____ |
docs/source/notebooks/tutorial/4-More-Value-Expressions.ipynb | ###Markdown
More Value Expressions Setup
###Code
import ibis
import os
hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070)
hdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port)
con = ibis.impala.connect(host='quickstart.cloudera', database='ibis_testing',
hdfs_client=hdfs)
ibis.options.interactive = True
###Output
_____no_output_____
###Markdown
Type castingThe Ibis type system is pretty basic and will get better (and more documented over time). It maps directly onto the current Impala type system- `int8`- `int16`- `int32`- `int64`- `boolean`- `float`- `double`- `string`- `timestamp`- `decimal($precision, $scale)`These type names can be used to cast from one type to another
###Code
table = con.table('functional_alltypes')
table.string_col.cast('double').sum()
table.string_col.cast('decimal(12,2)').sum()
###Output
_____no_output_____
###Markdown
Case / if-then-else expressionsWe support a number of variants of the SQL-equivalent `CASE` expression, and will add more API functions over time to meet different use cases and enhance the expressiveness of any branching-based value logic.
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.when('1', 'fo')
.when('0', 'fum')
.else_(table.string_col)
.end()
.name('new_strings'))
expr.value_counts()
###Output
_____no_output_____
###Markdown
If the `else_` default condition is not provided, any values not matching one of the conditions will be `NULL`.
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.end()
.name('with_nulls'))
expr.value_counts()
###Output
_____no_output_____
###Markdown
To test for an arbitrary series of boolean conditions, use the `case` API method and pass any boolean expressions potentially involving columns of the table:
###Code
expr = (ibis.case()
.when(table.int_col > 5, table.bigint_col * 2)
.when(table.int_col > 2, table.bigint_col)
.else_(table.int_col)
.end())
table['id', 'int_col', 'bigint_col', expr.name('case_result')].limit(20)
###Output
_____no_output_____
###Markdown
Simple ternary-cases (like the Python `X if COND else Y`) can be written using the `ifelse` function:
###Code
expr = ((table.int_col > 5)
.ifelse(table.bigint_col / 2, table.bigint_col * 2)
.name('ifelse_result'))
table['int_col', 'bigint_col', expr].limit(10)
###Output
_____no_output_____
###Markdown
Set membershipThe `isin` and `notin` functions are like their pandas counterparts. These can take:- A list of value expressions, either literal values or other column expressions- An array/column expression of some kind
###Code
bool_clause = table.string_col.notin(['1', '4', '7'])
table[bool_clause].string_col.value_counts()
###Output
_____no_output_____
###Markdown
You can also check for membership in an array. Here is an example of filtering based on the top 3 (ignoring ties) most frequently-occurring values in the `string_col` column of alltypes:
###Code
top_strings = table.string_col.value_counts().limit(3).string_col
top_filter = table.string_col.isin(top_strings)
expr = table[top_filter]
expr.count()
###Output
_____no_output_____
###Markdown
This is a common enough operation that we provide a special analytical filter function `topk`:
###Code
table[table.string_col.topk(3)].count()
###Output
_____no_output_____
###Markdown
Cool, huh? More on `topk` later. Null-nessLike their pandas equivalents, the `isnull` and `notnull` functions return True values if the values are null, or non-null, respectively. For example:
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.when('1', 'fo')
.end()
.name('new_strings'))
expr.isnull().value_counts()
###Output
_____no_output_____
###Markdown
Functions like `isnull` can be combined with `case` expressions or functions like `ifelse` to replace null values with some other value. `ifelse` here will use the first value supplied for any `True` value and the second value for any `False` value. Either value can be a scalar or array.
###Code
expr2 = expr.isnull().ifelse('was null', expr).name('strings')
expr2.value_counts()
###Output
_____no_output_____
###Markdown
Distinct-based operationsIbis supports using `distinct` to remove duplicate rows or values on tables or arrays. For example:
###Code
table['int_col', 'bigint_col'].distinct()
table.string_col.distinct()
###Output
_____no_output_____
###Markdown
This can be combined with `count` to form a reduction metric:
###Code
metric = (table.bigint_col
.distinct().count()
.name('unique_bigints'))
###Output
_____no_output_____
###Markdown
This is common enough to have a shortcut `nunique`:
###Code
table.string_col.nunique()
###Output
_____no_output_____
###Markdown
String operationsWhat's supported is pretty basic right now. We intend to support the full gamut of regular expression munging with a nice API, though in some cases some work will be required on Impala's backend to support everything.
###Code
nation = con.table('tpch_nation')
nation.limit(5)
###Output
_____no_output_____
###Markdown
At the moment, basic substring operations (`substr`, with conveniences `left` and `right`) and Python-like APIs such as `lower` and `upper` (for case normalization) are supported. So you could count first letter occurrences in a string column like so:
###Code
expr = nation.n_name.lower().left(1).name('first_letter')
expr.value_counts().sort_by(('count', False))
###Output
_____no_output_____
###Markdown
For fuzzy and regex filtering/searching, you can use one of the following- `like`, works as the SQL `LIKE` keyword- `rlike`, like `re.search` or SQL `RLIKE`- `contains`, like `x in str_value` in Python
###Code
nation[nation.n_name.like('%GE%')]
nation[nation.n_name.lower().rlike('.*ge.*')]
nation[nation.n_name.lower().contains('ge')]
###Output
_____no_output_____
###Markdown
Timestamp operationsDate and time functionality is relatively limited at present compared with pandas, but we'll get there. The main things we have right now are- Field access (year, month, day, ...)- Timedeltas- Comparisons with fixed timestamps
###Code
table = con.table('functional_alltypes')
table[table.timestamp_col, table.timestamp_col.minute().name('minute')].limit(10)
###Output
_____no_output_____
###Markdown
Somewhat more comprehensively
###Code
def get_field(f):
return getattr(table.timestamp_col, f)().name(f)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second', 'millisecond']
projection = [table.timestamp_col] + [get_field(x) for x in fields]
table[projection].limit(10)
###Output
_____no_output_____
###Markdown
For timestamp arithmetic and comparisons, check out functions in the top level `ibis` namespace. This include things like `day` and `second`, but also the `ibis.timestamp` function:
###Code
table[table.timestamp_col.min(), table.timestamp_col.max(), table.count().name('nrows')]
table[table.timestamp_col < '2010-01-01'].count()
table[table.timestamp_col <
(ibis.timestamp('2010-01-01') + ibis.month(3))].count()
expr = (table.timestamp_col + ibis.day(1) + ibis.hour(4)).name('offset')
table[table.timestamp_col, expr, ibis.now().name('current_time')].limit(10)
###Output
_____no_output_____
###Markdown
More Value Expressions Setup
###Code
import ibis
import os
hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070)
hdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port)
con = ibis.impala.connect(host='quickstart.cloudera', database='ibis_testing',
hdfs_client=hdfs)
ibis.options.interactive = True
###Output
_____no_output_____
###Markdown
Type castingThe Ibis type system is pretty basic and will get better (and more documented over time). It maps directly onto the current Impala type system- `int8`- `int16`- `int32`- `int64`- `boolean`- `float`- `double`- `string`- `timestamp`- `decimal($precision, $scale)`These type names can be used to cast from one type to another
###Code
table = con.table('functional_alltypes')
table.string_col.cast('double').sum()
table.string_col.cast('decimal(12,2)').sum()
###Output
_____no_output_____
###Markdown
Case / if-then-else expressionsWe support a number of variants of the SQL-equivalent `CASE` expression, and will add more API functions over time to meet different use cases and enhance the expressiveness of any branching-based value logic.
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.when('1', 'fo')
.when('0', 'fum')
.else_(table.string_col)
.end()
.name('new_strings'))
expr.value_counts()
###Output
_____no_output_____
###Markdown
If the `else_` default condition is not provided, any values not matching one of the conditions will be `NULL`.
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.end()
.name('with_nulls'))
expr.value_counts()
###Output
_____no_output_____
###Markdown
To test for an arbitrary series of boolean conditions, use the `case` API method and pass any boolean expressions potentially involving columns of the table:
###Code
expr = (ibis.case()
.when(table.int_col > 5, table.bigint_col * 2)
.when(table.int_col > 2, table.bigint_col)
.else_(table.int_col)
.end())
table['id', 'int_col', 'bigint_col', expr.name('case_result')].limit(20)
###Output
_____no_output_____
###Markdown
Simple ternary-cases (like the Python `X if COND else Y`) can be written using the `ifelse` function:
###Code
expr = ((table.int_col > 5)
.ifelse(table.bigint_col / 2, table.bigint_col * 2)
.name('ifelse_result'))
table['int_col', 'bigint_col', expr].limit(10)
###Output
_____no_output_____
###Markdown
Set membershipThe `isin` and `notin` functions are like their pandas counterparts. These can take:- A list of value expressions, either literal values or other column expressions- An array/column expression of some kind
###Code
bool_clause = table.string_col.notin(['1', '4', '7'])
table[bool_clause].string_col.value_counts()
###Output
_____no_output_____
###Markdown
You can also check for membership in an array. Here is an example of filtering based on the top 3 (ignoring ties) most frequently-occurring values in the `string_col` column of alltypes:
###Code
top_strings = table.string_col.value_counts().limit(3).string_col
top_filter = table.string_col.isin(top_strings)
expr = table[top_filter]
expr.count()
###Output
_____no_output_____
###Markdown
This is a common enough operation that we provide a special analytical filter function `topk`:
###Code
table[table.string_col.topk(3)].count()
###Output
_____no_output_____
###Markdown
Cool, huh? More on `topk` later. Null-nessLike their pandas equivalents, the `isnull` and `notnull` functions return True values if the values are null, or non-null, respectively. For example:
###Code
expr = (table.string_col
.case()
.when('4', 'fee')
.when('7', 'fi')
.when('1', 'fo')
.end()
.name('new_strings'))
expr.isnull().value_counts()
###Output
_____no_output_____
###Markdown
Functions like `isnull` can be combined with `case` expressions or functions like `ifelse` to replace null values with some other value. `ifelse` here will use the first value supplied for any `True` value and the second value for any `False` value. Either value can be a scalar or array.
###Code
expr2 = expr.isnull().ifelse('was null', expr).name('strings')
expr2.value_counts()
###Output
_____no_output_____
###Markdown
Distinct-based operationsIbis supports using `distinct` to remove duplicate rows or values on tables or arrays. For example:
###Code
table['int_col', 'bigint_col'].distinct()
table.string_col.distinct()
###Output
_____no_output_____
###Markdown
This can be combined with `count` to form a reduction metric:
###Code
metric = (table.bigint_col
.distinct().count()
.name('unique_bigints'))
###Output
_____no_output_____
###Markdown
This is common enough to have a shortcut `nunique`:
###Code
table.string_col.nunique()
###Output
_____no_output_____
###Markdown
String operationsWhat's supported is pretty basic right now. We intend to support the full gamut of regular expression munging with a nice API, though in some cases some work will be required on Impala's backend to support everything.
###Code
nation = con.table('tpch_nation')
nation.limit(5)
###Output
_____no_output_____
###Markdown
At the moment, basic substring operations (`substr`, with conveniences `left` and `right`) and Python-like APIs such as `lower` and `upper` (for case normalization) are supported. So you could count first letter occurrences in a string column like so:
###Code
expr = nation.n_name.lower().left(1).name('first_letter')
expr.value_counts().sort_by(('count', False))
###Output
_____no_output_____
###Markdown
For fuzzy and regex filtering/searching, you can use one of the following- `like`, works as the SQL `LIKE` keyword- `rlike`, like `re.search` or SQL `RLIKE`- `contains`, like `x in str_value` in Python
###Code
nation[nation.n_name.like('%GE%')]
nation[nation.n_name.lower().rlike('.*ge.*')]
nation[nation.n_name.lower().contains('ge')]
###Output
_____no_output_____
###Markdown
Timestamp operationsDate and time functionality is relatively limited at present compared with pandas, but we'll get there. The main things we have right now are- Field access (year, month, day, ...)- Timedeltas- Comparisons with fixed timestamps
###Code
table = con.table('functional_alltypes')
table[table.timestamp_col, table.timestamp_col.minute().name('minute')].limit(10)
###Output
_____no_output_____
###Markdown
Somewhat more comprehensively
###Code
def get_field(f):
return getattr(table.timestamp_col, f)().name(f)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second', 'millisecond']
projection = [table.timestamp_col] + [get_field(x) for x in fields]
table[projection].limit(10)
###Output
_____no_output_____
###Markdown
For timestamp arithmetic and comparisons, check out functions in the top level `ibis` namespace. This include things like `day` and `second`, but also the `ibis.timestamp` function:
###Code
table[table.timestamp_col.min(), table.timestamp_col.max(), table.count().name('nrows')]
table[table.timestamp_col < '2010-01-01'].count()
table[table.timestamp_col <
(ibis.timestamp('2010-01-01') + ibis.interval(months=3))].count()
expr = (table.timestamp_col + ibis.interval(days=1) + ibis.interval(hours=4)).name('offset')
table[table.timestamp_col, expr, ibis.now().name('current_time')].limit(10)
###Output
_____no_output_____ |
nbs/02_data.load.ipynb | ###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
fastai DataLoader> API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility
###Code
#default_exp data.load
#export
from fastai.torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
#export
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
#hide
from nbdev.showdoc import *
bs = 4
letters = list(string.ascii_lowercase)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`. Use python's `random` functionality to implement it.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
from subprocess import Popen, PIPE
# test num_workers > 0 in scripts works when python process start method is spawn
process = Popen(["python", "dltest.py"], stdout=PIPE)
_, err = process.communicate(timeout=15)
exit_code = process.wait()
test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1. Note it does not need to use `self.rng` anymore to maintain consistent behavior across workers.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
#export
def collate_error(e:Exception, batch):
"Raises error when the batch could not collate, stating what items in the batch are different sizes and their types"
err = f'Error when trying to collate the data into batches with fa_collate, at least two tensors in the batch are not the same size.\n\n'
# we need to iterate through the entire batch and find a mismatch
length = len(batch[0])
for idx in range(length): # for each type in the batch
for i, item in enumerate(batch):
if i == 0: shape_a, type_a = item[idx].shape, item[idx].__class__.__name__
elif item[idx].shape != shape_a:
shape_b = item[idx].shape
if shape_a != shape_b:
err += f'Mismatch found on axis {idx} of the batch and is of type `{type_a}`:\n\tItem at index 0 has shape: {shape_a}\n\tItem at index {i} has shape: {shape_b}\n\nPlease include a transform in `after_item` that ensures all data of type {type_a} is the same size'
e.args = [err]
raise
#hide
batch = [torch.rand(3, 375, 500), torch.rand(3, 375, 500), torch.rand(3, 500, 333)]
with ExceptionExpected(RuntimeError, "Mismatch found on axis 0 of the batch and is of type `Tensor`"):
try:
fa_collate(batch)
except Exception as e:
collate_error(e, batch)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# pin_memory causes tuples to be converted to lists, so convert them back to tuples
if self.pin_memory and type(b) == list: b = tuple(b)
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b):
try: return (fa_collate,fa_convert)[self.prebatched](b)
except Exception as e:
if not self.prebatched: collate_error(e,b)
raise
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `create_item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1. Note it does not need to use `self.rng` anymore to maintain consistent behavior across workers.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# fix issue 2899. If the process start method isn't fork, the data will be copied to cuda in learner one_batch.
if self.device is not None and multiprocessing.get_start_method().lower() == "fork":
b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
from subprocess import Popen, PIPE
# test num_workers > 0 in scripts works when python process start method is spawn
process = Popen(["python", "dltest.py"], stdout=PIPE)
_, err = process.communicate(timeout=15)
exit_code = process.wait()
test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# fix issue 2899. If the process start method isn't fork, the data will be copied to cuda in learner one_batch.
if self.device is not None and multiprocessing.get_start_method().lower() == "fork":
b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader(GetAttr):
_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
Converted migrating.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Data Load> Using the fastai2 `Datasets` to make an time series dataset.For now all is univerable but in the future I would also like to add multiplevariable. TODO reduce mem: https://forums.fast.ai/t/how-to-handle-dataframes-too-large-to-fit-in-memory/39208/19
###Code
# export
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Dataloader
###Code
# export
@delegates()
class TSDataLoader(TfmdDL):
def __init__(self, dataset, horizon, lookback=72, step=1, min_seq_len=None, max_std= 2, **kwargs):
self.horizon, self.lookback, self.step, self.max_std = horizon, lookback, step, max_std
self.min_seq_len = ifnone(min_seq_len, lookback)
self.dataset = [o.float() for o in L(dataset).map(tensor)]
n = self.make_ids()
super().__init__(dataset=self.dataset, **kwargs)
self.n = n
self.skipped= []
@delegates(TfmdDL.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, horizon=self.horizon, lookback=self.lookback, step=self.step , **kwargs)
res.make_ids()
return res
def make_ids(self):
"""Make ids if the sequence is shorter than `min_seq_len`, it will drop that sequence."""
# Slice each time series into examples, assigning IDs to each
last_id = 0
n_dropped = 0
n_needs_padding = 0
self._ids = {}
for i, ts in enumerate(self.dataset):
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
num_examples = (ts.shape[-1] - self.lookback - self.horizon + self.step) // self.step
# Time series shorter than the forecast horizon need to be dropped.
if ts.shape[-1] < self.min_seq_len:
n_dropped += 1
continue
# For short time series zero pad the input
if ts.shape[-1] < self.lookback + self.horizon:
n_needs_padding += 1
num_examples = 1
for j in range(num_examples):
self._ids[last_id + j] = (i, j * self.step)
last_id += num_examples
# Inform user about time series that were too short
if n_dropped > 0:
print("Dropped {}/{} time series due to length.".format(
n_dropped, len(self.dataset)))
# Inform user about time series that were short
if n_needs_padding > 0:
print("Need to pad {}/{} time series due to length.".format(
n_needs_padding, len(self.dataset)))
# Store the number of training examples
return int(self._ids.__len__() )
def shuffle_fn(self, idxs):
# self.dataset.shuffle()
return idxs
def get_id(self, idx):
# Get time series
ts_id, lookback_id = self._ids[idx]
ts = self.dataset[ts_id]
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
# Prepare input and target. Zero pad if necessary.
if ts.shape[-1] < self.lookback + self.horizon:
# If the time series is too short, we zero pad
x = ts[:, :-self.horizon]
mean = x.mean()
x = np.pad(
x,
pad_width=((0, 0), (self.lookback - x.shape[-1], 0)),
mode='constant',
constant_values=mean
)
y = ts[:,-self.lookback + self.horizon:]
y = np.pad(
y,
pad_width=((0, 0), (self.lookback + self.horizon - y.shape[-1], 0)),
mode='constant',
constant_values=mean
)
assert y.shape == (1,self.lookback+self.horizon), f"{y.shape}\t,{idx}, , 'tsshape':{ts.shape},'ts_id':{ts_id}"
else:
x = ts[:,lookback_id:lookback_id + self.lookback]
y = ts[:,lookback_id:lookback_id + self.lookback + self.horizon]
return x, y
def create_item(self, idx):
if idx>=self.n:
raise IndexError
x, y = self.get_id(idx)
if (y/(x.std()+1e-7)).std() > self.max_std:
if idx not in self.skipped:
# print(f"idx: {idx};y.std to high: {(y/x.std()).std()} > {self.max_std}")
self.skipped.append(idx)
raise SkipItemException()
return TSTensorSeq(x),TSTensorSeqy(y)
# hide
# basic test
horizon,lookback = 8,2
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
pass
# hide
# check padding
horizon,lookback = 2,10
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
test_eq(o[0][0,0,0],o[0].mean(-1))
# hide
# skip due to large y.std/x.std ratio
horizon,lookback = 2,10
ints = L(np.concatenate([np.ones(11),np.array([1e10])])[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
test_eq(list(dl),[])
# hide
horizon,lookback = 2,5
ints = L(np.arange(7)[None,:],np.arange(7,15)[None,:],np.arange(15,25)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=2)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 7)
test_eq(len(dl),4)
test_eq(dl.one_batch()[0].dtype,torch.float)
for o in dl:pass
# hide
ints = L(np.arange(10)[None,:],np.arange(20,30)[None,:],np.arange(40,50)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=False)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 12)
test_eq(len(dl), 2)
dl_new = dl.new()
test_eq(dl_new.lookback,dl.lookback)
test_eq(dl_new.n, dl.n)
test_eq(len(dl_new), len(dl))
test_eq(dl.one_batch(),dl_new.one_batch())
# hide
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=True)
dl.dataset=L(np.arange(1,11)[None,:])
for o in dl.one_batch():
test_eq(0 in o, False)
# hide
horizon,lookback = 2,5
ints = L(np.arange(7.)[None,:],np.arange(7,15.)[None,:],np.arange(15,20.)[None,:],np.arange(20,29.)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon, lookback, norm=False)
test_eq(L(dl)[0][0][0,0].dtype,torch.float)
horizon,lookback = 2,5
ints = L(np.arange(9.)[None,:],np.arange(9.,14)[None,:]).map(tensor)
ints
dl = TSDataLoader(ints, horizon, lookback, step=2, norm=False)
list(dl)
###Output
Need to pad 1/2 time series due to length.
###Markdown
The first sequence (0 to 7) is transformed in to two items. One with x from 0 to 4 and y from 0 to 6. The next one is shifted just two, because `step` == 2. The second sequence (and third resulting item) is not long enough and is therefore padded with the mean of x (`10`). Note both x and y are padded with the mean of x Showing
###Code
# export
from fastai2.vision.data import get_grid
@typedispatch
def show_batch(x: TensorSeq, y, samples, ctxs=None, max_n=10,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_batch[object](x, y, samples=samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x: TensorSeq, y, samples, outs, ctxs=None, max_n=9,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(len(outs[0])):
ctxs = [TSTensorSeqy(b ,m='*r', label='pred').show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b, c, _ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Intergration Example
###Code
from fastseq.core import *
from fastai2.basics import *
path = untar_data(URLs.m4_daily)
df_train = pd.read_csv(path/'train.csv',nrows=300)
df_test = pd.read_csv(path/'val.csv')
df_test.head()
horizon = 14
lookback = 14*3
items = ts_lists(df_train.iloc[:,1:].values)
splits = RandomSplitter()(items)
dl = TSDataLoader(items, horizon = horizon, lookback = lookback, step=5)
dl.show_batch()
# hide
for o in dl:
pass
for o in dl:
pass
# hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_core.ipynb.
Converted 01_data.external.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 05_nbeats.models.ipynb.
Converted 06_nbeats.callbacks.ipynb.
Converted 07_nbeats.learner.ipynb.
Converted 08_nbeats.interpret.ipynb.
Converted 11_metrics.ipynb.
Converted 12_compare.ipynb.
Converted index.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# pin_memory causes tuples to be converted to lists, so convert them back to tuples
if self.pin_memory and type(b) == list: b = tuple(b)
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `create_item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoaders> The `DataLoader` class
###Code
#|export
from __future__ import annotations
from fastai.torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
#|hide
from nbdev.showdoc import *
bs = 4
letters = list(string.ascii_lowercase)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#|export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers,pin_memory_device):
self.dataset,self.default,self.worker_init_fn,self.pin_memory_device = self,d,_wif,pin_memory_device
store_attr('d,pin_memory,num_workers,timeout,persistent_workers,pin_memory_device')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#|export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#|export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#|export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
#|export
def collate_error(e:Exception, batch):
"Raises error when the batch could not collate, stating what items in the batch are different sizes and their types"
err = f'Error when trying to collate the data into batches with fa_collate, at least two tensors in the batch are not the same size.\n\n'
# we need to iterate through the entire batch and find a mismatch
length = len(batch[0])
for idx in range(length): # for each type in the batch
for i, item in enumerate(batch):
if i == 0: shape_a, type_a = item[idx].shape, item[idx].__class__.__name__
elif item[idx].shape != shape_a:
shape_b = item[idx].shape
if shape_a != shape_b:
err += f'Mismatch found on axis {idx} of the batch and is of type `{type_a}`:\n\tItem at index 0 has shape: {shape_a}\n\tItem at index {i} has shape: {shape_b}\n\nPlease include a transform in `after_item` that ensures all data of type {type_a} is the same size'
e.args = [err]
raise
#|hide
batch = [torch.rand(3, 375, 500), torch.rand(3, 375, 500), torch.rand(3, 500, 333)]
with ExceptionExpected(RuntimeError, "Mismatch found on axis 0 of the batch and is of type `Tensor`"):
try:
fa_collate(batch)
except Exception as e:
collate_error(e, batch)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#|export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False,
pin_memory_device='', **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers,
pin_memory_device=pin_memory_device)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# pin_memory causes tuples to be converted to lists, so convert them back to tuples
if self.pin_memory and type(b) == list: b = tuple(b)
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b):
try: return (fa_collate,fa_convert)[self.prebatched](b)
except Exception as e:
if not self.prebatched: collate_error(e,b)
raise
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#|export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `create_item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#|hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `create_item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
Data Load> Using the fastai2 `Datasets` to make an time series dataset.For now all is univerable but in the future I would also like to add multiplevariable. TODO reduce mem: https://forums.fast.ai/t/how-to-handle-dataframes-too-large-to-fit-in-memory/39208/19
###Code
# export
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Dataloader
###Code
# export
@delegates()
class TSDataLoader(TfmdDL):
def __init__(self, dataset, horizon, lookback=72, step=1, min_seq_len=None, max_std= 2, **kwargs):
self.horizon, self.lookback, self.step, self.max_std = horizon, lookback, step, max_std
self.min_seq_len = ifnone(min_seq_len, lookback)
self.dataset = [o.float() for o in L(dataset).map(tensor)]
n = self.make_ids()
super().__init__(dataset=self.dataset, **kwargs)
self.n = n
self.skipped= []
@delegates(TfmdDL.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, horizon=self.horizon, lookback=self.lookback, step=self.step , **kwargs)
res.make_ids()
return res
def make_ids(self):
"""Make ids if the sequence is shorter than `min_seq_len`, it will drop that sequence."""
# Slice each time series into examples, assigning IDs to each
last_id = 0
n_dropped = 0
n_needs_padding = 0
self._ids = {}
for i, ts in enumerate(self.dataset):
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
num_examples = (ts.shape[-1] - self.lookback - self.horizon + self.step) // self.step
# Time series shorter than the forecast horizon need to be dropped.
if ts.shape[-1] < self.min_seq_len:
n_dropped += 1
continue
# For short time series zero pad the input
if ts.shape[-1] < self.lookback + self.horizon:
n_needs_padding += 1
num_examples = 1
for j in range(num_examples):
self._ids[last_id + j] = (i, j * self.step)
last_id += num_examples
# Inform user about time series that were too short
if n_dropped > 0:
print("Dropped {}/{} time series due to length.".format(
n_dropped, len(self.dataset)))
# Inform user about time series that were short
if n_needs_padding > 0:
print("Need to pad {}/{} time series due to length.".format(
n_needs_padding, len(self.dataset)))
# Store the number of training examples
return int(self._ids.__len__() )
def shuffle_fn(self, idxs):
# self.dataset.shuffle()
return idxs
def get_id(self, idx):
# Get time series
ts_id, lookback_id = self._ids[idx]
ts = self.dataset[ts_id]
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
# Prepare input and target. Zero pad if necessary.
if ts.shape[-1] < self.lookback + self.horizon:
# If the time series is too short, we zero pad
x = ts[:, :-self.horizon]
mean = x.mean()
x = np.pad(
x,
pad_width=((0, 0), (self.lookback - x.shape[-1], 0)),
mode='constant',
constant_values=mean
)
y = ts[:,-self.lookback + self.horizon:]
y = np.pad(
y,
pad_width=((0, 0), (self.lookback + self.horizon - y.shape[-1], 0)),
mode='constant',
constant_values=mean
)
assert y.shape == (1,self.lookback+self.horizon), f"{y.shape}\t,{idx}, , 'tsshape':{ts.shape},'ts_id':{ts_id}"
else:
x = ts[:,lookback_id:lookback_id + self.lookback]
y = ts[:,lookback_id:lookback_id + self.lookback + self.horizon]
return x, y
def create_item(self, idx):
if idx>=self.n:
raise IndexError
x, y = self.get_id(idx)
if (y/(x.std()+1e-7)).std() > self.max_std:
if idx not in self.skipped:
# print(f"idx: {idx};y.std to high: {(y/x.std()).std()} > {self.max_std}")
self.skipped.append(idx)
raise SkipItemException()
return TSTensorSeq(x),TSTensorSeqy(y)
# hide
# basic test
horizon,lookback = 8,2
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
pass
# hide
# check padding
horizon,lookback = 2,10
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
test_eq(o[0][0,0,0],o[0].mean(-1))
# hide
# skip due to large y.std/x.std ratio
horizon,lookback = 2,10
ints = L(np.concatenate([np.ones(11),np.array([1e10])])[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
test_eq(list(dl),[])
# hide
horizon,lookback = 2,5
ints = L(np.arange(7)[None,:],np.arange(7,15)[None,:],np.arange(15,25)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=2)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 7)
test_eq(len(dl),4)
test_eq(dl.one_batch()[0].dtype,torch.float)
for o in dl:pass
# hide
ints = L(np.arange(10)[None,:],np.arange(20,30)[None,:],np.arange(40,50)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=False)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 12)
test_eq(len(dl), 2)
dl_new = dl.new()
test_eq(dl_new.lookback,dl.lookback)
test_eq(dl_new.n, dl.n)
test_eq(len(dl_new), len(dl))
test_eq(dl.one_batch(),dl_new.one_batch())
# hide
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=True)
dl.dataset=L(np.arange(1,11)[None,:])
for o in dl.one_batch():
test_eq(0 in o, False)
# hide
horizon,lookback = 2,5
ints = L(np.arange(7.)[None,:],np.arange(7,15.)[None,:],np.arange(15,20.)[None,:],np.arange(20,29.)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon, lookback, norm=False)
test_eq(L(dl)[0][0][0,0].dtype,torch.float)
horizon,lookback = 2,5
ints = L(np.arange(9.)[None,:],np.arange(9.,14)[None,:]).map(tensor)
ints
dl = TSDataLoader(ints, horizon, lookback, step=2, norm=False)
list(dl)
###Output
Need to pad 1/2 time series due to length.
###Markdown
The first sequence (0 to 7) is transformed in to two items. One with x from 0 to 4 and y from 0 to 6. The next one is shifted just two, because `step` == 2. The second sequence (and third resulting item) is not long enough and is therefore padded with the mean of x (`10`). Note both x and y are padded with the mean of x Showing
###Code
# export
from fastai2.vision.data import get_grid
@typedispatch
def show_batch(x: TensorSeq, y, samples, ctxs=None, max_n=10,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_batch[object](x, y, samples=samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x: TensorSeq, y, samples, outs, ctxs=None, max_n=9,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(len(outs[0])):
ctxs = [TSTensorSeqy(b ,m='*r', label='pred').show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b, c, _ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Intergration Example
###Code
from fastseq.core import *
from fastai2.basics import *
path = untar_data(URLs.m4_daily)
df_train = pd.read_csv(path/'train.csv',nrows=300)
df_test = pd.read_csv(path/'val.csv')
df_test.head()
horizon = 14
lookback = 14*3
items = ts_lists(df_train.iloc[:,1:].values)
splits = RandomSplitter()(items)
dl = TSDataLoader(items, horizon = horizon, lookback = lookback, step=5)
dl.show_batch()
# hide
for o in dl:
pass
for o in dl:
pass
# hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_core.ipynb.
Converted 01_data.external.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 05_nbeats.models.ipynb.
Converted 06_nbeats.callbacks.ipynb.
Converted 07_nbeats.learner.ipynb.
Converted 08_nbeats.interpret.ipynb.
Converted 11_metrics.ipynb.
Converted 12_compare.ipynb.
Converted index.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
# from subprocess import Popen, PIPE
# # test num_workers > 0 in scripts works when python process start method is spawn
# process = Popen(["python", "dltest.py"], stdout=PIPE)
# _, err = process.communicate(timeout=15)
# exit_code = process.wait()
# test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
# fix issue 2899. If the process start method isn't fork, the data will be copied to cuda in learner one_batch.
if self.device is not None and multiprocessing.get_start_method().lower() == "fork":
b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu')`. Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
from subprocess import Popen, PIPE
# test num_workers > 0 in scripts works when python process start method is spawn
process = Popen(["python", "dltest.py"], stdout=PIPE)
_, err = process.communicate(timeout=15)
exit_code = process.wait()
test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1. Note it does not need to use `self.rng` anymore to maintain consistent behavior across workers.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader(GetAttr):
_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
Converted migrating.ipynb.
###Markdown
Data Load> Using the fastai `Datasets` to make an time series dataset.For now all is univerable but in the future I would also like to add multiplevariable. TODO reduce mem: https://forums.fast.ai/t/how-to-handle-dataframes-too-large-to-fit-in-memory/39208/19
###Code
# export
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Dataloader
###Code
# export
@delegates()
class TSDataLoader(TfmdDL):
def __init__(self, dataset, horizon, lookback=72, step=1, min_seq_len=None, max_std= 2, **kwargs):
self.horizon, self.lookback, self.step, self.max_std = horizon, lookback, step, max_std
self.min_seq_len = ifnone(min_seq_len, lookback)
self.dataset = [o.float() for o in L(dataset).map(tensor)]
n = self.make_ids()
super().__init__(dataset=self.dataset, **kwargs)
self.n = n
self.skipped= []
@delegates(TfmdDL.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, horizon=self.horizon, lookback=self.lookback, step=self.step , **kwargs)
res.make_ids()
return res
def make_ids(self):
"""Make ids if the sequence is shorter than `min_seq_len`, it will drop that sequence."""
# Slice each time series into examples, assigning IDs to each
last_id = 0
n_dropped = 0
n_needs_padding = 0
self._ids = {}
for i, ts in enumerate(self.dataset):
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
num_examples = (ts.shape[-1] - self.lookback - self.horizon + self.step) // self.step
# Time series shorter than the forecast horizon need to be dropped.
if ts.shape[-1] < self.min_seq_len:
n_dropped += 1
continue
# For short time series zero pad the input
if ts.shape[-1] < self.lookback + self.horizon:
n_needs_padding += 1
num_examples = 1
for j in range(num_examples):
self._ids[last_id + j] = (i, j * self.step)
last_id += num_examples
# Inform user about time series that were too short
if n_dropped > 0:
print("Dropped {}/{} time series due to length.".format(
n_dropped, len(self.dataset)))
# Inform user about time series that were short
if n_needs_padding > 0:
print("Need to pad {}/{} time series due to length.".format(
n_needs_padding, len(self.dataset)))
# Store the number of training examples
return int(self._ids.__len__() )
def shuffle_fn(self, idxs):
# self.dataset.shuffle()
return idxs
def get_id(self, idx):
# Get time series
ts_id, lookback_id = self._ids[idx]
ts = self.dataset[ts_id]
if isinstance(ts,tuple):
ts = ts[0] # no idea why they become tuples
# Prepare input and target. Zero pad if necessary.
if ts.shape[-1] < self.lookback + self.horizon:
# If the time series is too short, we zero pad
x = ts[:, :-self.horizon]
mean = x.mean()
x = np.pad(
x,
pad_width=((0, 0), (self.lookback - x.shape[-1], 0)),
mode='constant',
constant_values=mean
)
y = ts[:,-self.lookback + self.horizon:]
y = np.pad(
y,
pad_width=((0, 0), (self.lookback + self.horizon - y.shape[-1], 0)),
mode='constant',
constant_values=mean
)
assert y.shape == (1,self.lookback+self.horizon), f"{y.shape}\t,{idx}, , 'tsshape':{ts.shape},'ts_id':{ts_id}"
else:
x = ts[:,lookback_id:lookback_id + self.lookback]
y = ts[:,lookback_id:lookback_id + self.lookback + self.horizon]
return x, y
def create_item(self, idx):
if idx>=self.n:
raise IndexError
x, y = self.get_id(idx)
if (y/(x.std()+1e-7)).std() > self.max_std:
if idx not in self.skipped:
# print(f"idx: {idx};y.std to high: {(y/x.std()).std()} > {self.max_std}")
self.skipped.append(idx)
raise SkipItemException()
return TSTensorSeq(x),TSTensorSeqy(y)
# hide
# basic test
horizon,lookback = 8,2
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
pass
# hide
# check padding
horizon,lookback = 2,10
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
for o in dl:
test_eq(o[0][0,0,0],o[0].mean(-1))
# hide
# skip due to large y.std/x.std ratio
horizon,lookback = 2,10
ints = L(np.concatenate([np.ones(11),np.array([1e10])])[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback)
test_eq(list(dl),[])
# hide
horizon,lookback = 2,5
ints = L(np.arange(7)[None,:],np.arange(7,15)[None,:],np.arange(15,25)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=2)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 7)
test_eq(len(dl),4)
test_eq(dl.one_batch()[0].dtype,torch.float)
for o in dl:pass
# hide
ints = L(np.arange(10)[None,:],np.arange(20,30)[None,:],np.arange(40,50)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=False)
test_eq(dl.n, len(dl._ids))
test_eq(dl.n, 12)
test_eq(len(dl), 2)
dl_new = dl.new()
test_eq(dl_new.lookback,dl.lookback)
test_eq(dl_new.n, dl.n)
test_eq(len(dl_new), len(dl))
test_eq(dl.one_batch(),dl_new.one_batch())
# hide
ints = L(np.arange(10)[None,:])
dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=6, shuffle=True)
dl.dataset=L(np.arange(1,11)[None,:])
for o in dl.one_batch():
test_eq(0 in o, False)
# hide
horizon,lookback = 2,5
ints = L(np.arange(7.)[None,:],np.arange(7,15.)[None,:],np.arange(15,20.)[None,:],np.arange(20,29.)[None,:]).map(tensor)
dl = TSDataLoader(ints, horizon, lookback, norm=False)
test_eq(L(dl)[0][0][0,0].dtype,torch.float)
horizon,lookback = 2,5
ints = L(np.arange(9.)[None,:],np.arange(9.,14)[None,:]).map(tensor)
ints
dl = TSDataLoader(ints, horizon, lookback, step=2, norm=False)
list(dl)
###Output
Need to pad 1/2 time series due to length.
###Markdown
The first sequence (0 to 7) is transformed in to two items. One with x from 0 to 4 and y from 0 to 6. The next one is shifted just two, because `step` == 2. The second sequence (and third resulting item) is not long enough and is therefore padded with the mean of x (`10`). Note both x and y are padded with the mean of x Showing
###Code
# export
from fastai.vision.data import get_grid
@typedispatch
def show_batch(x: TensorSeq, y, samples, ctxs=None, max_n=10,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_batch[object](x, y, samples=samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x: TensorSeq, y, samples, outs, ctxs=None, max_n=9,nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(len(outs[0])):
ctxs = [TSTensorSeqy(b ,m='*r', label='pred').show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b, c, _ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Intergration Example
###Code
from fastseq.core import *
from fastai.basics import *
path = untar_data(URLs.m4_daily)
df_train = pd.read_csv(path/'train.csv',nrows=300)
df_test = pd.read_csv(path/'val.csv')
df_test.head()
horizon = 14
lookback = 14*3
items = ts_lists(df_train.iloc[:,1:].values)
splits = RandomSplitter()(items)
dl = TSDataLoader(items, horizon = horizon, lookback = lookback, step=5)
dl.show_batch()
# hide
for o in dl:
pass
for o in dl:
pass
# hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_core.ipynb.
Converted 01_data.external.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 05_nbeats.models.ipynb.
Converted 06_nbeats.callbacks.ipynb.
Converted 07_nbeats.learner.ipynb.
Converted 08_nbeats.interpret.ipynb.
Converted 11_metrics.ipynb.
Converted 12_compare.ipynb.
Converted index.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1. Note it does not need to use `self.rng` anymore to maintain consistent behavior across workers.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader(GetAttr):
_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
def _fn_noops(self, x=None, *args, **kwargs): return x
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,_fn_noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = (hasattr(dataset,'__getitem__')
and not isinstance(dataset, IterableDataset))
if not indexed and shuffle: raise ValueError("Can only shuffle an indexed dataset (not an iterable one).")
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0:
print("Due to IPython and Windows limitation, python multiprocessing isn't available now.")
print("So `number_workers` is changed to 0 to avoid getting stuck")
num_workers = 0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
if self.dataset is not None: self.it = iter(self.dataset)
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s):
if self.indexed: return self.dataset[s or 0]
elif s is None: return next(self.it)
else: raise IndexError("Cannot index an iterable dataset numerically - must use `None`.")
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): The `DataLoader` will make a guess as to whether the dataset can be indexed (or is iterable), but you can override it with this parameter. `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size with `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_num_workers = 0 if sys.platform == "win32" else 4
test_eq(dl.fake_l.num_workers, test_num_workers)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, test_num_workers)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2], dtype=np.int64),array([3,4,5], dtype=np.int64))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
###Output
_____no_output_____
###Markdown
Iterable dataloaders require specific tests.
###Code
class DummyIterableDataset(IterableDataset):
def __iter__(self):
yield from range(11)
ds1 = DataLoader(DummyIterableDataset(), bs=4)
# Check it yields fine, and check we can do multiple passes
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10])))
# Check `drop_last` works fine (with multiple passes, since this will prematurely terminate the iterator)
ds1 = DataLoader(DummyIterableDataset(), bs=4, drop_last=True)
for i in range(3):
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7])))
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
L(dl)
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
if not (sys.platform == "win32" and IN_NOTEBOOK):
%time test_shuffled(L(DataLoader(it, num_workers=4)), L(range(30)))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
from subprocess import Popen, PIPE
# test num_workers > 0 in scripts works when python process start method is spawn
process = Popen(["python", "dltest.py"], stdout=PIPE)
_, err = process.communicate(timeout=15)
exit_code = process.wait()
test_eq(exit_code, 0)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `get_idxs` to return the same index until consumption of the DL. This is intented to test consistent sampling behavior when `num_workers`>1. Note it does not need to use `self.rng` anymore to maintain consistent behavior across workers.
###Code
class AdamantDL(DataLoader):
def get_idxs(self):
r=random.randint(0,self.n-1)
return [r] * self.n
test_eq(torch.cat(tuple(AdamantDL((list(range(50))),bs=16,num_workers=4))).unique().numel(),1)
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
list(DataLoader(list(range(50)),bs=32,shuffle=True,num_workers=3))
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader(GetAttr):
_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
Converted migrating.ipynb.
###Markdown
fastai DataLoader> API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility
###Code
#default_exp data.load
#export
from fastai.torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
#export
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
#hide
from nbdev.showdoc import *
bs = 4
letters = list(string.ascii_lowercase)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`. Use python's `random` functionality to implement it.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
show_doc(DataLoader)
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader(GetAttr):
_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0,
shuffle=False, drop_last=False, indexed=None, n=None, **kwargs):
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l): yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 05_data.core.ipynb.
Converted 06_data.transforms.ipynb.
Converted 07_data.block.ipynb.
Converted 08_vision.core.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09a_vision.data.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.model.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 96_data.external.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(o) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
###Markdown
fastai DataLoader> API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility
###Code
#default_exp data.load
#export
from fastai.torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
#export
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
#hide
from nbdev.showdoc import *
bs = 4
letters = list(string.ascii_lowercase)
###Output
_____no_output_____
###Markdown
DataLoader helpers fastai includes a replacement for Pytorch's *DataLoader* which is largely API-compatible, and adds a lot of useful functionality and flexibility. Before we look at the class, there are a couple of helpers we'll need to define.
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler,generator,prefetch_factor = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count,None,2)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
show_doc(SkipItemException, title_level=3)
###Output
_____no_output_____
###Markdown
DataLoader -
###Code
#export
@log_args(but='dataset,wif,create_batch,create_batches,create_item,retain,get_idxs,sample,shuffle_fn,do_batch')
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
#export
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`. Use python's `random` functionality to implement it.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Return a subset of the dataset containing the index values of the sample if there are samples, else return the next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details (https://pytorch.org/docs/stable/data.html#multi-process-data-loading).",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
###Output
_____no_output_____
###Markdown
Arguments to `DataLoader`:* `dataset`: dataset from which to load the data. Can be either map-style or iterable-style dataset.* `bs` (int): how many samples per batch to load (if `batch_size` is provided then `batch_size` will override `bs`). If `bs=None`, then it is assumed that `dataset.__getitem__` returns a batch.* `num_workers` (int): how many subprocesses to use for data loading. `0` means that the data will be loaded in the main process.* `pin_memory` (bool): If `True`, the data loader will copy Tensors into CUDA pinned memory before returning them.* `timeout` (float>0): the timeout value in seconds for collecting a batch from workers.* `batch_size` (int): It is only provided for PyTorch compatibility. Use `bs`.* `shuffle` (bool): If `True`, then data is shuffled every time dataloader is fully read/iterated.* `drop_last` (bool): If `True`, then the last incomplete batch is dropped.* `indexed` (bool): Set to `False`, if you are using iterable-style dataset. Otherwise it is set to `True` by default.* `n` (int): Defaults to `len(dataset)`. If you are using iterable-style dataset, you can specify the size of batch using `n`.* `device` (torch.device): Defaults to `default_device()` which is CUDA by default. You can specify device as `torch.device('cpu'). Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
DataLoader
###Code
#export
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.nw,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last,dataset_kind,_dataset_kind,_index_sampler = (
None,False,noops,False,_DatasetKind.Iterable,_DatasetKind.Iterable,Inf.count)
def __init__(self, d, pin_memory, num_workers, timeout):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr(self, 'd,pin_memory,num_workers,timeout')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_nw = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_nw
_collate_types = (ndarray, Tensor, typing.Mapping, str)
#export
def fa_collate(t):
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
#e.g. x is int, y is tuple
t = [(1,(2,3)),(1,(2,3))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
t = [(1,(2,(3,4))),(1,(2,(3,4)))]
test_eq(fa_collate(t), default_collate(t))
test_eq(L(fa_collate(t)).map(type), [Tensor,tuple])
test_eq(L(fa_collate(t)[1]).map(type), [Tensor,tuple])
#export
def fa_convert(t):
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
t0 = array([1,2])
t = [t0,(t0,t0)]
test_eq(fa_convert(t), default_convert(t))
test_eq(L(fa_convert(t)).map(type), [Tensor,tuple])
#export
class SkipItemException(Exception): pass
#export
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods:
exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr(self, 'dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.nw,self.offs = random.Random(),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
idxs = self.get_idxs()
return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): delattr(self, 'it')
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods: cur_kwargs[n] = getattr(self, n)
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
###Output
_____no_output_____
###Markdown
Override `item` and use the default infinite sampler to get a stream of unknown length (`stop()` when you want to stop the stream).
###Code
class RandDL(DataLoader):
def create_item(self, s):
r = random.random()
return r if r<0.95 else stop()
L(RandDL())
L(RandDL(bs=4, drop_last=True)).map(len)
dl = RandDL(bs=4, num_workers=4, drop_last=True)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
with dl.fake_l.no_multiproc():
test_eq(dl.fake_l.num_workers, 0)
L(dl).map(len)
test_eq(dl.fake_l.num_workers, 4)
def _rand_item(s):
r = random.random()
return r if r<0.95 else stop()
L(DataLoader(create_item=_rand_item))
###Output
_____no_output_____
###Markdown
If you don't set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a batch.
###Code
ds1 = DataLoader(letters)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
test_shuffled(L(DataLoader(letters, shuffle=True)), letters)
ds1 = DataLoader(letters, indexed=False)
test_eq(L(ds1), letters)
test_eq(len(ds1), 26)
t2 = L(tensor([0,1,2]),tensor([3,4,5]))
ds2 = DataLoader(t2)
test_eq_type(L(ds2), t2)
t3 = L(array([0,1,2]),array([3,4,5]))
ds3 = DataLoader(t3)
test_eq_type(L(ds3), t3.map(tensor))
ds4 = DataLoader(t3, create_batch=noop, after_iter=lambda: setattr(t3, 'f', 1))
test_eq_type(L(ds4), t3)
test_eq(t3.f, 1)
###Output
_____no_output_____
###Markdown
If you do set `bs`, then `dataset` is assumed to provide an iterator or a `__getitem__` that returns a single item of a batch.
###Code
def twoepochs(d): return ' '.join(''.join(list(o)) for _ in range(2) for o in d)
ds1 = DataLoader(letters, bs=4, drop_last=True, num_workers=0)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx abcd efgh ijkl mnop qrst uvwx')
ds1 = DataLoader(letters,4,num_workers=2)
test_eq(twoepochs(ds1), 'abcd efgh ijkl mnop qrst uvwx yz abcd efgh ijkl mnop qrst uvwx yz')
ds1 = DataLoader(range(12), bs=4, num_workers=3)
test_eq_type(L(ds1), L(tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])))
ds1 = DataLoader([str(i) for i in range(11)], bs=4, after_iter=lambda: setattr(t3, 'f', 2))
test_eq_type(L(ds1), L(['0','1','2','3'],['4','5','6','7'],['8','9','10']))
test_eq(t3.f, 2)
it = iter(DataLoader(map(noop,range(20)), bs=4, num_workers=1))
test_eq_type([next(it) for _ in range(3)], [tensor([0,1,2,3]),tensor([4,5,6,7]),tensor([8,9,10,11])])
class SleepyDL(list):
def __getitem__(self,i):
time.sleep(random.random()/50)
return super().__getitem__(i)
t = SleepyDL(letters)
%time test_eq(DataLoader(t, num_workers=0), letters)
%time test_eq(DataLoader(t, num_workers=2), letters)
%time test_eq(DataLoader(t, num_workers=4), letters)
dl = DataLoader(t, shuffle=True, num_workers=1)
test_shuffled(L(dl), letters)
test_shuffled(L(dl), L(dl))
class SleepyQueue():
"Simulate a queue with varying latency"
def __init__(self, q): self.q=q
def __iter__(self):
while True:
time.sleep(random.random()/100)
try: yield self.q.get_nowait()
except queues.Empty: return
q = Queue()
for o in range(30): q.put(o)
it = SleepyQueue(q)
%time test_shuffled(L(DataLoader(it, num_workers=4)), range(30))
class A(TensorBase): pass
for nw in (0,2):
t = A(tensor([1,2]))
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b), A)
t = (A(tensor([1,2])),)
dl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=nw)
b = first(dl)
test_eq(type(b[0]), A)
class A(TensorBase): pass
t = A(tensor(1,2))
tdl = DataLoader([t,t,t,t,t,t,t,t], bs=4, num_workers=2, after_batch=to_device)
b = first(tdl)
test_eq(type(b), A)
# Unknown attributes are delegated to `dataset`
test_eq(tdl.pop(), tensor(1,2))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
|
apps/recommendation-wide-n-deep/wide_n_deep.ipynb | ###Markdown
Wide & Deep Recommender Demo Wide and Deep Learning Model, proposed by Google in 2016, is a DNN-Linear mixed model. Wide and deep learning has been used for Google App Store for their app recommendation.In this tutorial, we use Recommender API of Analytics Zoo to build a wide linear model and a deep neural network, which is called Wide&Deep model, and use optimizer of BigDL to train the neural network. Wide&Deep model combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). Intialization import necessary libraries
###Code
from zoo.models.recommendation import *
from zoo.models.recommendation.utils import *
from zoo.common.nncontext import init_nncontext
import os
import sys
import datetime as dt
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Initilaize NN context, it will get a SparkContext with optimized configuration for BigDL performance.
###Code
sc = init_nncontext("WideAndDeep Example")
###Output
_____no_output_____
###Markdown
Data Preparation Download and read movielens 1M rating data, understand the dimension.
###Code
from bigdl.dataset import movielens
movielens_data = movielens.get_id_ratings("/tmp/movielens/")
min_user_id = np.min(movielens_data[:,0])
max_user_id = np.max(movielens_data[:,0])
min_movie_id = np.min(movielens_data[:,1])
max_movie_id = np.max(movielens_data[:,1])
rating_labels= np.unique(movielens_data[:,2])
print(movielens_data.shape)
print(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels)
###Output
(1000209, 3)
(1, 6040, 1, 3952, array([1, 2, 3, 4, 5]))
###Markdown
Transform ratings into dataframe, read user and item data into dataframes. Transform labels to zero-based since the original labels start from 1.
###Code
sqlContext = SQLContext(sc)
from pyspark.sql.types import *
from pyspark.sql import Row
Rating = Row("userId", "itemId", "label")
User = Row("userId", "gender", "age" ,"occupation")
Item = Row("itemId", "title" ,"genres")
ratings = sc.parallelize(movielens_data)\
.map(lambda l: (int(l[0]), int(l[1]), int(l[2])-1))\
.map(lambda r: Rating(*r))
ratingDF = sqlContext.createDataFrame(ratings)
users= sc.textFile("/tmp/movielens/ml-1m/users.dat")\
.map(lambda l: l.split("::")[0:4])\
.map(lambda l: (int(l[0]), l[1], int(l[2]), int(l[3])))\
.map(lambda r: User(*r))
userDF = sqlContext.createDataFrame(users)
items = sc.textFile("/tmp/movielens/ml-1m/movies.dat")\
.map(lambda l: l.split("::")[0:3])\
.map(lambda l: (int(l[0]), l[1], l[2].split('|')[0]))\
.map(lambda r: Item(*r))
itemDF = sqlContext.createDataFrame(items)
###Output
_____no_output_____
###Markdown
Join data together, and transform data. For example, gender is going be used as categorical feature, occupation and gender will be used as crossed features.
###Code
from pyspark.sql.functions import col, udf
gender_udf = udf(lambda gender: categorical_from_vocab_list(gender, ["F", "M"], start=1))
bucket_cross_udf = udf(lambda feature1, feature2: hash_bucket(str(feature1) + "_" + str(feature2), bucket_size=100))
genres_list = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "Children's",
"War", "Documentary", "Fantasy", "Mystery", "Musical", "Animation", "Film-Noir", "Horror",
"Western", "Comedy", "Action", "Sci-Fi"]
genres_udf = udf(lambda genres: categorical_from_vocab_list(genres, genres_list, start=1))
allDF = ratingDF.join(userDF, ["userId"]).join(itemDF, ["itemId"]) \
.withColumn("gender", gender_udf(col("gender")).cast("int")) \
.withColumn("age-gender", bucket_cross_udf(col("age"), col("gender")).cast("int")) \
.withColumn("genres", genres_udf(col("genres")).cast("int"))
allDF.show(5)
###Output
+------+------+-----+------+---+----------+--------------+------+----------+
|itemId|userId|label|gender|age|occupation| title|genres|age-gender|
+------+------+-----+------+---+----------+--------------+------+----------+
| 26| 3391| 3| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 1447| 4| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 5107| 3| 1| 45| 0|Othello (1995)| 5| 5|
| 26| 2878| 3| 1| 50| 20|Othello (1995)| 5| 47|
| 26| 1527| 1| 2| 18| 10|Othello (1995)| 5| 24|
+------+------+-----+------+---+----------+--------------+------+----------+
only showing top 5 rows
###Markdown
Speficy data feature information shared by the WideAndDeep model and its feature generation. Here, we use occupation gender for wide base part, age and gender crossed as wide cross part, genres and gender as indicators, userid and itemid for embedding.
###Code
bucket_size = 100
column_info = ColumnFeatureInfo(
wide_base_cols=["occupation", "gender"],
wide_base_dims=[21, 3],
wide_cross_cols=["age-gender"],
wide_cross_dims=[bucket_size],
indicator_cols=["genres", "gender"],
indicator_dims=[19, 3],
embed_cols=["userId", "itemId"],
embed_in_dims=[max_user_id, max_movie_id],
embed_out_dims=[64, 64],
continuous_cols=["age"])
###Output
_____no_output_____
###Markdown
Transform data to RDD of Sample. We use optimizer of BigDL directly to train the model, it requires data to be provided in format of RDD(Sample). A Sample is a BigDL data structure which can be constructed using 2 numpy arrays, feature and label respectively. The API interface is Sample.from_ndarray(feature, label). Wide&Deep model need two input tensors, one is SparseTensor for the Wide model, another is a DenseTensor for the Deep model.
###Code
rdds = allDF.rdd\
.map(lambda row: to_user_item_feature(row, column_info))\
.repartition(4)
trainPairFeatureRdds, valPairFeatureRdds = rdds.randomSplit([0.8, 0.2], seed= 1)
valPairFeatureRdds.persist()
train_data= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
test_data= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
###Output
_____no_output_____
###Markdown
Create the Wide&Deep model. In Analytics Zoo, it is simple to build Wide&Deep model by calling WideAndDeep API. You need specify model type, and class number, as well as column information of features according to your data. You can also change other default parameters in the network, like hidden layers. The model could be fed into an Optimizer of BigDL or NNClassifier of analytics-zoo. Please refer to the document for more details. In this example, we demostrate how to use optimizer of BigDL.
###Code
wide_n_deep = WideAndDeep(5, column_info, "wide_n_deep")
###Output
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasSparseDense
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasMerge
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasMerge
creating: createZooKerasActivation
creating: createZooKerasModel
creating: createZooWideAndDeep
###Markdown
Create optimizer and train the model
###Code
wide_n_deep.compile(optimizer = "adam",
loss= "sparse_categorical_crossentropy",
metrics=['accuracy'])
tmp_log_dir = create_tmp_path()
wide_n_deep.set_tensorboard(tmp_log_dir, "training_wideanddeep")
###Output
_____no_output_____
###Markdown
Train the network. Wait some time till it finished.. Voila! You've got a trained model
###Code
%%time
# Boot training process
wide_n_deep.fit(train_data,
batch_size = 8000,
nb_epoch = 10,
validation_data = test_data)
print("Optimization Done.")
###Output
Optimization Done.
CPU times: user 67.5 ms, sys: 24.2 ms, total: 91.6 ms
Wall time: 3min 16s
###Markdown
Prediction and recommendation Zoo models make inferences based on the given data using model.predict(val_rdd) API. A result of RDD is returned. predict_class returns the predicted label.
###Code
results = wide_n_deep.predict(test_data)
results.take(5)
results_class = wide_n_deep.predict_class(test_data)
results_class.take(5)
###Output
_____no_output_____
###Markdown
In the Analytics Zoo, Recommender has provied 3 unique APIs to predict user-item pairs and make recommendations for users or items given candidates.Predict for user item pairs
###Code
userItemPairPrediction = wide_n_deep.predict_user_item_pair(valPairFeatureRdds)
for result in userItemPairPrediction.take(5): print(result)
###Output
UserItemPrediction [user_id: 5305, item_id: 26, prediction: 4, probability: 0.610632181168]
UserItemPrediction [user_id: 1150, item_id: 26, prediction: 2, probability: 0.393449008465]
UserItemPrediction [user_id: 4294, item_id: 26, prediction: 2, probability: 0.276259899139]
UserItemPrediction [user_id: 5948, item_id: 26, prediction: 4, probability: 0.508404672146]
UserItemPrediction [user_id: 3825, item_id: 26, prediction: 1, probability: 0.456133008003]
###Markdown
Recommend 3 items for each user given candidates in the feature RDDs
###Code
userRecs = wide_n_deep.recommend_for_user(valPairFeatureRdds, 3)
for result in userRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 4904, item_id: 1221, prediction: 5, probability: 0.873028695583]
UserItemPrediction [user_id: 4904, item_id: 919, prediction: 5, probability: 0.860194385052]
UserItemPrediction [user_id: 4904, item_id: 2762, prediction: 5, probability: 0.855066776276]
UserItemPrediction [user_id: 1084, item_id: 1213, prediction: 5, probability: 0.525161027908]
UserItemPrediction [user_id: 1084, item_id: 50, prediction: 5, probability: 0.517793118954]
###Markdown
Recommend 3 users for each item given candidates in the feature RDDs
###Code
itemRecs = wide_n_deep.recommend_for_item(valPairFeatureRdds, 3)
for result in itemRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 2244, item_id: 1084, prediction: 5, probability: 0.879560172558]
UserItemPrediction [user_id: 5582, item_id: 1084, prediction: 5, probability: 0.789601504803]
UserItemPrediction [user_id: 1835, item_id: 1084, prediction: 5, probability: 0.781116306782]
UserItemPrediction [user_id: 4511, item_id: 3764, prediction: 4, probability: 0.537709891796]
UserItemPrediction [user_id: 5080, item_id: 3764, prediction: 4, probability: 0.464907348156]
###Markdown
Draw the convergence curve
###Code
#retrieve train and validation summary object and read the loss data into ndarray's.
train_loss = np.array(wide_n_deep.get_train_summary("Loss"))
val_loss = np.array(wide_n_deep.get_validation_summary("Loss"))
#plot the train and validation curves
# each event data is a tuple in form of (iteration_count, value, timestamp)
plt.figure(figsize = (12,6))
plt.plot(train_loss[:,0],train_loss[:,1],label='train loss')
plt.plot(val_loss[:,0],val_loss[:,1],label='val loss',color='green')
plt.scatter(val_loss[:,0],val_loss[:,1],color='green')
plt.legend();
plt.xlim(0,train_loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
###Output
_____no_output_____
###Markdown
plot accuracy
###Code
plt.figure(figsize = (12,6))
top1 = np.array(wide_n_deep.get_validation_summary("Top1Accuracy"))
plt.plot(top1[:,0],top1[:,1],label='top1')
plt.xlim(0,top1.shape[0]+10)
plt.title("top1 accuracy")
plt.grid(True)
plt.legend();
valPairFeatureRdds.unpersist()
sc.stop()
###Output
_____no_output_____
###Markdown
Wide & Deep Recommender Demo Wide and Deep Learning Model, proposed by Google in 2016, is a DNN-Linear mixed model. Wide and deep learning has been used for Google App Store for their app recommendation.In this tutorial, we use Recommender API of Analytics Zoo to build a wide linear model and a deep neural network, which is called Wide&Deep model, and use optimizer of BigDL to train the neural network. Wide&Deep model combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). Intialization import necessary libraries
###Code
from zoo.models.recommendation import *
from zoo.models.recommendation.utils import *
from zoo.common.nncontext import init_nncontext
import os
import sys
import datetime as dt
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Initilaize NN context, it will get a SparkContext with optimized configuration for BigDL performance.
###Code
sc = init_nncontext("WideAndDeep Example")
###Output
_____no_output_____
###Markdown
Data Preparation Download and read movielens 1M rating data, understand the dimension.
###Code
from bigdl.dataset import movielens
movielens_data = movielens.get_id_ratings("/tmp/movielens/")
min_user_id = np.min(movielens_data[:,0])
max_user_id = np.max(movielens_data[:,0])
min_movie_id = np.min(movielens_data[:,1])
max_movie_id = np.max(movielens_data[:,1])
rating_labels= np.unique(movielens_data[:,2])
print(movielens_data.shape)
print(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels)
###Output
(1000209, 3)
(1, 6040, 1, 3952, array([1, 2, 3, 4, 5]))
###Markdown
Transform ratings into dataframe, read user and item data into dataframes. Transform labels to zero-based since the original labels start from 1.
###Code
sqlContext = SQLContext(sc)
from pyspark.sql.types import *
from pyspark.sql import Row
Rating = Row("userId", "itemId", "label")
User = Row("userId", "gender", "age" ,"occupation")
Item = Row("itemId", "title" ,"genres")
ratings = sc.parallelize(movielens_data)\
.map(lambda l: (int(l[0]), int(l[1]), int(l[2])-1))\
.map(lambda r: Rating(*r))
ratingDF = sqlContext.createDataFrame(ratings)
users= sc.textFile("/tmp/movielens/ml-1m/users.dat")\
.map(lambda l: l.split("::")[0:4])\
.map(lambda l: (int(l[0]), l[1], int(l[2]), int(l[3])))\
.map(lambda r: User(*r))
userDF = sqlContext.createDataFrame(users)
items = sc.textFile("/tmp/movielens/ml-1m/movies.dat")\
.map(lambda l: l.split("::")[0:3])\
.map(lambda l: (int(l[0]), l[1], l[2].split('|')[0]))\
.map(lambda r: Item(*r))
itemDF = sqlContext.createDataFrame(items)
###Output
_____no_output_____
###Markdown
Join data together, and transform data. For example, gender is going be used as categorical feature, occupation and gender will be used as crossed features.
###Code
from pyspark.sql.functions import col, udf
gender_udf = udf(lambda gender: categorical_from_vocab_list(gender, ["F", "M"], start=1))
bucket_cross_udf = udf(lambda feature1, feature2: hash_bucket(str(feature1) + "_" + str(feature2), bucket_size=100))
genres_list = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "Children's",
"War", "Documentary", "Fantasy", "Mystery", "Musical", "Animation", "Film-Noir", "Horror",
"Western", "Comedy", "Action", "Sci-Fi"]
genres_udf = udf(lambda genres: categorical_from_vocab_list(genres, genres_list, start=1))
allDF = ratingDF.join(userDF, ["userId"]).join(itemDF, ["itemId"]) \
.withColumn("gender", gender_udf(col("gender")).cast("int")) \
.withColumn("age-gender", bucket_cross_udf(col("age"), col("gender")).cast("int")) \
.withColumn("genres", genres_udf(col("genres")).cast("int"))
allDF.show(5)
###Output
+------+------+-----+------+---+----------+--------------+------+----------+
|itemId|userId|label|gender|age|occupation| title|genres|age-gender|
+------+------+-----+------+---+----------+--------------+------+----------+
| 26| 3391| 3| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 1447| 4| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 5107| 3| 1| 45| 0|Othello (1995)| 5| 5|
| 26| 2878| 3| 1| 50| 20|Othello (1995)| 5| 47|
| 26| 1527| 1| 2| 18| 10|Othello (1995)| 5| 24|
+------+------+-----+------+---+----------+--------------+------+----------+
only showing top 5 rows
###Markdown
Speficy data feature information shared by the WideAndDeep model and its feature generation. Here, we use occupation gender for wide base part, age and gender crossed as wide cross part, genres and gender as indicators, userid and itemid for embedding.
###Code
bucket_size = 100
column_info = ColumnFeatureInfo(
wide_base_cols=["occupation", "gender"],
wide_base_dims=[21, 3],
wide_cross_cols=["age-gender"],
wide_cross_dims=[bucket_size],
indicator_cols=["genres", "gender"],
indicator_dims=[19, 3],
embed_cols=["userId", "itemId"],
embed_in_dims=[max_user_id, max_movie_id],
embed_out_dims=[64, 64],
continuous_cols=["age"])
###Output
_____no_output_____
###Markdown
Transform data to RDD of Sample. We use optimizer of BigDL directly to train the model, it requires data to be provided in format of RDD(Sample). A Sample is a BigDL data structure which can be constructed using 2 numpy arrays, feature and label respectively. The API interface is Sample.from_ndarray(feature, label). Wide&Deep model need two input tensors, one is SparseTensor for the Wide model, another is a DenseTensor for the Deep model.
###Code
rdds = allDF.rdd.map(lambda row: to_user_item_feature(row, column_info))
trainPairFeatureRdds, valPairFeatureRdds = rdds.randomSplit([0.8, 0.2], seed= 1)
valPairFeatureRdds.persist()
train_data= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
test_data= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
###Output
_____no_output_____
###Markdown
Create the Wide&Deep model. In Analytics Zoo, it is simple to build Wide&Deep model by calling WideAndDeep API. You need specify model type, and class number, as well as column information of features according to your data. You can also change other default parameters in the network, like hidden layers. The model could be fed into an Optimizer of BigDL or NNClassifier of analytics-zoo. Please refer to the document for more details. In this example, we demostrate how to use optimizer of BigDL.
###Code
wide_n_deep = WideAndDeep(5, column_info, "wide_n_deep")
###Output
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasSparseDense
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasMerge
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasMerge
creating: createZooKerasActivation
creating: createZooKerasModel
creating: createZooWideAndDeep
###Markdown
Create optimizer and train the model
###Code
wide_n_deep.compile(optimizer = "adam",
loss= "sparse_categorical_crossentropy",
metrics=['accuracy'])
tmp_log_dir = create_tmp_path()
wide_n_deep.set_tensorboard(tmp_log_dir, "training_wideanddeep")
###Output
_____no_output_____
###Markdown
Train the network. Wait some time till it finished.. Voila! You've got a trained model
###Code
%%time
# Boot training process
wide_n_deep.fit(train_data,
batch_size = 8000,
nb_epoch = 10,
validation_data = test_data)
print("Optimization Done.")
###Output
Optimization Done.
CPU times: user 54.3 ms, sys: 19.7 ms, total: 74 ms
Wall time: 2min 30s
###Markdown
Prediction and recommendation Zoo models make inferences based on the given data using model.predict(val_rdd) API. A result of RDD is returned. predict_class returns the predicted label.
###Code
results = wide_n_deep.predict(test_data)
results.take(5)
results_class = wide_n_deep.predict_class(test_data)
results_class.take(5)
###Output
_____no_output_____
###Markdown
In the Analytics Zoo, Recommender has provied 3 unique APIs to predict user-item pairs and make recommendations for users or items given candidates.Predict for user item pairs
###Code
userItemPairPrediction = wide_n_deep.predict_user_item_pair(valPairFeatureRdds)
for result in userItemPairPrediction.take(5): print(result)
###Output
UserItemPrediction [user_id: 5305, item_id: 26, prediction: 4, probability: 0.447520256042]
UserItemPrediction [user_id: 1150, item_id: 26, prediction: 2, probability: 0.42147180438]
UserItemPrediction [user_id: 4294, item_id: 26, prediction: 4, probability: 0.338612318039]
UserItemPrediction [user_id: 5948, item_id: 26, prediction: 5, probability: 0.385789096355]
UserItemPrediction [user_id: 3825, item_id: 26, prediction: 2, probability: 0.292931675911]
###Markdown
Recommend 3 items for each user given candidates in the feature RDDs
###Code
userRecs = wide_n_deep.recommend_for_user(valPairFeatureRdds, 3)
for result in userRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 4904, item_id: 1221, prediction: 5, probability: 0.901316523552]
UserItemPrediction [user_id: 4904, item_id: 593, prediction: 5, probability: 0.890776693821]
UserItemPrediction [user_id: 4904, item_id: 913, prediction: 5, probability: 0.888917982578]
UserItemPrediction [user_id: 1084, item_id: 50, prediction: 5, probability: 0.632001161575]
UserItemPrediction [user_id: 1084, item_id: 912, prediction: 5, probability: 0.584099054337]
###Markdown
Recommend 3 users for each item given candidates in the feature RDDs
###Code
itemRecs = wide_n_deep.recommend_for_item(valPairFeatureRdds, 3)
for result in itemRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 1835, item_id: 1084, prediction: 5, probability: 0.745298802853]
UserItemPrediction [user_id: 3864, item_id: 1084, prediction: 5, probability: 0.744241654873]
UserItemPrediction [user_id: 5582, item_id: 1084, prediction: 5, probability: 0.739497065544]
UserItemPrediction [user_id: 4511, item_id: 3764, prediction: 4, probability: 0.44239372015]
UserItemPrediction [user_id: 116, item_id: 3764, prediction: 4, probability: 0.365347951651]
###Markdown
Draw the convergence curve
###Code
#retrieve train and validation summary object and read the loss data into ndarray's.
train_loss = np.array(wide_n_deep.get_train_summary("Loss"))
val_loss = np.array(wide_n_deep.get_validation_summary("Loss"))
#plot the train and validation curves
# each event data is a tuple in form of (iteration_count, value, timestamp)
plt.figure(figsize = (12,6))
plt.plot(train_loss[:,0],train_loss[:,1],label='train loss')
plt.plot(val_loss[:,0],val_loss[:,1],label='val loss',color='green')
plt.scatter(val_loss[:,0],val_loss[:,1],color='green')
plt.legend();
plt.xlim(0,train_loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
###Output
_____no_output_____
###Markdown
plot accuracy
###Code
plt.figure(figsize = (12,6))
top1 = np.array(wide_n_deep.get_validation_summary("Top1Accuracy"))
plt.plot(top1[:,0],top1[:,1],label='top1')
plt.title("top1 accuracy")
plt.grid(True)
plt.legend();
plt.xlim(0,train_loss.shape[0]+10)
valPairFeatureRdds.unpersist()
sc.stop()
###Output
_____no_output_____
###Markdown
Wide & Deep Recommender Demo Wide and Deep Learning Model, proposed by Google in 2016, is a DNN-Linear mixed model. Wide and deep learning has been used for Google App Store for their app recommendation.In this tutorial, we use Recommender API of Analytics Zoo to build a wide linear model and a deep neural network, which is called Wide&Deep model, and use optimizer of BigDL to train the neural network. Wide&Deep model combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). Intialization import necessary libraries
###Code
from zoo.models.recommendation import *
from zoo.models.recommendation.utils import *
from zoo.common.nncontext import init_nncontext
import os
import sys
import datetime as dt
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Initilaize NN context, it will get a SparkContext with optimized configuration for BigDL performance.
###Code
sc = init_nncontext("WideAndDeep Example")
###Output
_____no_output_____
###Markdown
Data Preparation Download and read movielens 1M rating data, understand the dimension.
###Code
from bigdl.dataset import movielens
movielens_data = movielens.get_id_ratings("/tmp/movielens/")
min_user_id = np.min(movielens_data[:,0])
max_user_id = np.max(movielens_data[:,0])
min_movie_id = np.min(movielens_data[:,1])
max_movie_id = np.max(movielens_data[:,1])
rating_labels= np.unique(movielens_data[:,2])
print(movielens_data.shape)
print(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels)
###Output
(1000209, 3)
(1, 6040, 1, 3952, array([1, 2, 3, 4, 5]))
###Markdown
Transform ratings into dataframe, read user and item data into dataframes. Transform labels to zero-based since the original labels start from 1.
###Code
sqlContext = SQLContext(sc)
from pyspark.sql.types import *
from pyspark.sql import Row
Rating = Row("userId", "itemId", "label")
User = Row("userId", "gender", "age" ,"occupation")
Item = Row("itemId", "title" ,"genres")
ratings = sc.parallelize(movielens_data)\
.map(lambda l: (int(l[0]), int(l[1]), int(l[2])-1))\
.map(lambda r: Rating(*r))
ratingDF = sqlContext.createDataFrame(ratings)
users= sc.textFile("/tmp/movielens/ml-1m/users.dat")\
.map(lambda l: l.split("::")[0:4])\
.map(lambda l: (int(l[0]), l[1], int(l[2]), int(l[3])))\
.map(lambda r: User(*r))
userDF = sqlContext.createDataFrame(users)
items = sc.textFile("/tmp/movielens/ml-1m/movies.dat")\
.map(lambda l: l.split("::")[0:3])\
.map(lambda l: (int(l[0]), l[1], l[2].split('|')[0]))\
.map(lambda r: Item(*r))
itemDF = sqlContext.createDataFrame(items)
###Output
_____no_output_____
###Markdown
Join data together, and transform data. For example, gender is going be used as categorical feature, occupation and gender will be used as crossed features.
###Code
from pyspark.sql.functions import col, udf
gender_udf = udf(lambda gender: categorical_from_vocab_list(gender, ["F", "M"], start=1))
bucket_cross_udf = udf(lambda feature1, feature2: hash_bucket(str(feature1) + "_" + str(feature2), bucket_size=100))
genres_list = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "Children's",
"War", "Documentary", "Fantasy", "Mystery", "Musical", "Animation", "Film-Noir", "Horror",
"Western", "Comedy", "Action", "Sci-Fi"]
genres_udf = udf(lambda genres: categorical_from_vocab_list(genres, genres_list, start=1))
allDF = ratingDF.join(userDF, ["userId"]).join(itemDF, ["itemId"]) \
.withColumn("gender", gender_udf(col("gender")).cast("int")) \
.withColumn("age-gender", bucket_cross_udf(col("age"), col("gender")).cast("int")) \
.withColumn("genres", genres_udf(col("genres")).cast("int"))
allDF.show(5)
###Output
+------+------+-----+------+---+----------+--------------+------+----------+
|itemId|userId|label|gender|age|occupation| title|genres|age-gender|
+------+------+-----+------+---+----------+--------------+------+----------+
| 26| 3391| 3| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 1447| 4| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 5107| 3| 1| 45| 0|Othello (1995)| 5| 5|
| 26| 2878| 3| 1| 50| 20|Othello (1995)| 5| 47|
| 26| 1527| 1| 2| 18| 10|Othello (1995)| 5| 24|
+------+------+-----+------+---+----------+--------------+------+----------+
only showing top 5 rows
###Markdown
Speficy data feature information shared by the WideAndDeep model and its feature generation. Here, we use occupation gender for wide base part, age and gender crossed as wide cross part, genres and gender as indicators, userid and itemid for embedding.
###Code
bucket_size = 100
column_info = ColumnFeatureInfo(
wide_base_cols=["occupation", "gender"],
wide_base_dims=[21, 3],
wide_cross_cols=["age-gender"],
wide_cross_dims=[bucket_size],
indicator_cols=["genres", "gender"],
indicator_dims=[19, 3],
embed_cols=["userId", "itemId"],
embed_in_dims=[max_user_id, max_movie_id],
embed_out_dims=[64, 64],
continuous_cols=["age"])
###Output
_____no_output_____
###Markdown
Transform data to RDD of Sample. We use optimizer of BigDL directly to train the model, it requires data to be provided in format of RDD(Sample). A Sample is a BigDL data structure which can be constructed using 2 numpy arrays, feature and label respectively. The API interface is Sample.from_ndarray(feature, label). Wide&Deep model need two input tensors, one is SparseTensor for the Wide model, another is a DenseTensor for the Deep model.
###Code
rdds = allDF.rdd\
.map(lambda row: to_user_item_feature(row, column_info))\
.repartition(4)
trainPairFeatureRdds, valPairFeatureRdds = rdds.randomSplit([0.8, 0.2], seed= 1)
valPairFeatureRdds.persist()
train_data= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
test_data= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
###Output
_____no_output_____
###Markdown
Create the Wide&Deep model. In Analytics Zoo, it is simple to build Wide&Deep model by calling WideAndDeep API. You need specify model type, and class number, as well as column information of features according to your data. You can also change other default parameters in the network, like hidden layers. The model could be fed into an Optimizer of BigDL or NNClassifier of analytics-zoo. Please refer to the document for more details. In this example, we demostrate how to use optimizer of BigDL.
###Code
wide_n_deep = WideAndDeep(5, column_info, "wide_n_deep")
###Output
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasInput
creating: createZooKerasSparseDense
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasFlatten
creating: createZooKerasSelect
creating: createZooKerasEmbedding
creating: createZooKerasFlatten
creating: createZooKerasMerge
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasDense
creating: createZooKerasMerge
creating: createZooKerasActivation
creating: createZooKerasModel
creating: createZooWideAndDeep
###Markdown
Create optimizer and train the model
###Code
wide_n_deep.compile(optimizer = "adam",
loss= "sparse_categorical_crossentropy",
metrics=['accuracy'])
tmp_log_dir = create_tmp_path()
wide_n_deep.set_tensorboard(tmp_log_dir, "training_wideanddeep")
###Output
_____no_output_____
###Markdown
Train the network. Wait some time till it finished.. Voila! You've got a trained model
###Code
%%time
# Boot training process
wide_n_deep.fit(train_data,
batch_size = 8000,
nb_epoch = 10,
validation_data = test_data)
print("Optimization Done.")
###Output
Optimization Done.
CPU times: user 54.3 ms, sys: 19.7 ms, total: 74 ms
Wall time: 2min 30s
###Markdown
Prediction and recommendation Zoo models make inferences based on the given data using model.predict(val_rdd) API. A result of RDD is returned. predict_class returns the predicted label.
###Code
results = wide_n_deep.predict(test_data)
results.take(5)
results_class = wide_n_deep.predict_class(test_data)
results_class.take(5)
###Output
_____no_output_____
###Markdown
In the Analytics Zoo, Recommender has provied 3 unique APIs to predict user-item pairs and make recommendations for users or items given candidates.Predict for user item pairs
###Code
userItemPairPrediction = wide_n_deep.predict_user_item_pair(valPairFeatureRdds)
for result in userItemPairPrediction.take(5): print(result)
###Output
UserItemPrediction [user_id: 5305, item_id: 26, prediction: 4, probability: 0.447520256042]
UserItemPrediction [user_id: 1150, item_id: 26, prediction: 2, probability: 0.42147180438]
UserItemPrediction [user_id: 4294, item_id: 26, prediction: 4, probability: 0.338612318039]
UserItemPrediction [user_id: 5948, item_id: 26, prediction: 5, probability: 0.385789096355]
UserItemPrediction [user_id: 3825, item_id: 26, prediction: 2, probability: 0.292931675911]
###Markdown
Recommend 3 items for each user given candidates in the feature RDDs
###Code
userRecs = wide_n_deep.recommend_for_user(valPairFeatureRdds, 3)
for result in userRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 4904, item_id: 1221, prediction: 5, probability: 0.901316523552]
UserItemPrediction [user_id: 4904, item_id: 593, prediction: 5, probability: 0.890776693821]
UserItemPrediction [user_id: 4904, item_id: 913, prediction: 5, probability: 0.888917982578]
UserItemPrediction [user_id: 1084, item_id: 50, prediction: 5, probability: 0.632001161575]
UserItemPrediction [user_id: 1084, item_id: 912, prediction: 5, probability: 0.584099054337]
###Markdown
Recommend 3 users for each item given candidates in the feature RDDs
###Code
itemRecs = wide_n_deep.recommend_for_item(valPairFeatureRdds, 3)
for result in itemRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 1835, item_id: 1084, prediction: 5, probability: 0.745298802853]
UserItemPrediction [user_id: 3864, item_id: 1084, prediction: 5, probability: 0.744241654873]
UserItemPrediction [user_id: 5582, item_id: 1084, prediction: 5, probability: 0.739497065544]
UserItemPrediction [user_id: 4511, item_id: 3764, prediction: 4, probability: 0.44239372015]
UserItemPrediction [user_id: 116, item_id: 3764, prediction: 4, probability: 0.365347951651]
###Markdown
Draw the convergence curve
###Code
#retrieve train and validation summary object and read the loss data into ndarray's.
train_loss = np.array(wide_n_deep.get_train_summary("Loss"))
val_loss = np.array(wide_n_deep.get_validation_summary("Loss"))
#plot the train and validation curves
# each event data is a tuple in form of (iteration_count, value, timestamp)
plt.figure(figsize = (12,6))
plt.plot(train_loss[:,0],train_loss[:,1],label='train loss')
plt.plot(val_loss[:,0],val_loss[:,1],label='val loss',color='green')
plt.scatter(val_loss[:,0],val_loss[:,1],color='green')
plt.legend();
plt.xlim(0,train_loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
###Output
_____no_output_____
###Markdown
plot accuracy
###Code
plt.figure(figsize = (12,6))
top1 = np.array(wide_n_deep.get_validation_summary("Top1Accuracy"))
plt.plot(top1[:,0],top1[:,1],label='top1')
plt.title("top1 accuracy")
plt.grid(True)
plt.legend();
plt.xlim(0,train_loss.shape[0]+10)
valPairFeatureRdds.unpersist()
sc.stop()
###Output
_____no_output_____
###Markdown
Wide & Deep Recommender Demo Wide and Deep Learning Model, proposed by Google in 2016, is a DNN-Linear mixed model. Wide and deep learning has been used for Google App Store for their app recommendation.In this tutorial, we use Recommender API of Analytics Zoo to build a wide linear model and a deep neural network, which is called Wide&Deep model, and use optimizer of BigDL to train the neural network. Wide&Deep model combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). Intialization * import necessary libraries
###Code
from zoo.models.recommendation import *
from zoo.models.recommendation.utils import *
from zoo.common.nncontext import init_nncontext
import os
import sys
import datetime as dt
from bigdl.dataset.transformer import *
from bigdl.dataset.base import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
* Initilaize NN context, it will get a SparkContext with optimized configuration for BigDL performance.
###Code
sc = init_nncontext("WideAndDeep Example")
###Output
_____no_output_____
###Markdown
Data Preparation * Download and read movielens 1M rating data, understand the dimension.
###Code
from bigdl.dataset import movielens
movielens_data = movielens.get_id_ratings("/tmp/movielens/")
min_user_id = np.min(movielens_data[:,0])
max_user_id = np.max(movielens_data[:,0])
min_movie_id = np.min(movielens_data[:,1])
max_movie_id = np.max(movielens_data[:,1])
rating_labels= np.unique(movielens_data[:,2])
print(movielens_data.shape)
print(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels)
###Output
(1000209, 3)
1 6040 1 3952 [1 2 3 4 5]
###Markdown
* Transform ratings into dataframe, read user and item data into dataframes.
###Code
sqlContext = SQLContext(sc)
from pyspark.sql.types import *
from pyspark.sql import Row
Rating = Row("userId", "itemId", "label")
User = Row("userId", "gender", "age" ,"occupation")
Item = Row("itemId", "title" ,"genres")
ratings = sc.parallelize(movielens_data) \
.map(lambda line: map(int, line)) \
.map(lambda r: Rating(*r))
ratingDF = sqlContext.createDataFrame(ratings)
users= sc.textFile("/tmp/movielens/ml-1m/users.dat")\
.map(lambda line: line.split("::")[0:4])\
.map(lambda line: (int(line[0]), line[1], int(line[2]), int(line[3])))\
.map(lambda r: User(*r))
userDF = sqlContext.createDataFrame(users)
items = sc.textFile("/tmp/movielens/ml-1m/movies.dat") \
.map(lambda line: line.split("::")[0:3]) \
.map(lambda line: (int(line[0]), line[1], line[2].split('|')[0])) \
.map(lambda r: Item(*r))
itemDF = sqlContext.createDataFrame(items)
###Output
_____no_output_____
###Markdown
* Join data together, and transform data. For example, gender is going be used as categorical feature, occupation and gender will be used as crossed features.
###Code
from pyspark.sql.functions import col, udf
gender_udf = udf(lambda gender: categorical_from_vocab_list(gender, ["F", "M"], start=1))
bucket_cross_udf = udf(lambda feature1, feature2: hash_bucket(str(feature1) + "_" + str(feature2), bucket_size=100))
genres_list = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "Children's",
"War", "Documentary", "Fantasy", "Mystery", "Musical", "Animation", "Film-Noir", "Horror",
"Western", "Comedy", "Action", "Sci-Fi"]
genres_udf = udf(lambda genres: categorical_from_vocab_list(genres, genres_list, start=1))
allDF = ratingDF.join(userDF, ["userId"]).join(itemDF, ["itemId"]) \
.withColumn("gender", gender_udf(col("gender")).cast("int")) \
.withColumn("age-gender", bucket_cross_udf(col("age"), col("gender")).cast("int")) \
.withColumn("genres", genres_udf(col("genres")).cast("int"))
allDF.show(5)
###Output
+------+------+-----+------+---+----------+--------------+------+----------+
|itemId|userId|label|gender|age|occupation| title|genres|age-gender|
+------+------+-----+------+---+----------+--------------+------+----------+
| 26| 3391| 4| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 1447| 5| 2| 18| 4|Othello (1995)| 5| 24|
| 26| 5107| 4| 1| 45| 0|Othello (1995)| 5| 5|
| 26| 2878| 4| 1| 50| 20|Othello (1995)| 5| 47|
| 26| 1527| 2| 2| 18| 10|Othello (1995)| 5| 24|
+------+------+-----+------+---+----------+--------------+------+----------+
only showing top 5 rows
###Markdown
* Speficy data feature information shared by the WideAndDeep model and its feature generation. Here, we use occupation gender for wide base part, age and gender crossed as wide cross part, genres and gender as indicators, userid and itemid for embedding.
###Code
bucket_size = 100
column_info = ColumnFeatureInfo(
wide_base_cols=["occupation", "gender"],
wide_base_dims=[21, 3],
wide_cross_cols=["age-gender"],
wide_cross_dims=[bucket_size],
indicator_cols=["genres", "gender"],
indicator_dims=[19, 3],
embed_cols=["userId", "itemId"],
embed_in_dims=[max_user_id, max_movie_id],
embed_out_dims=[64, 64],
continuous_cols=["age"])
###Output
_____no_output_____
###Markdown
* Transform data to RDD of Sample. We use optimizer of BigDL directly to train the model, it requires data to be provided in format of RDD(Sample). A Sample is a BigDL data structure which can be constructed using 2 numpy arrays, feature and label respectively. The API interface is Sample.from_ndarray(feature, label). Wide&Deep model need two input tensors, one is SparseTensor for the Wide model, another is a DenseTensor for the Deep model.
###Code
rdds = allDF.rdd.map(lambda row: to_user_item_feature(row, column_info))
trainPairFeatureRdds, valPairFeatureRdds = rdds.randomSplit([0.8, 0.2], seed= 1)
valPairFeatureRdds.persist()
train_data= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
test_data= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)
###Output
_____no_output_____
###Markdown
Create the Wide&Deep model. * In Analytics Zoo, it is simple to build Wide&Deep model by calling WideAndDeep API. You need specify model type, and class number, as well as column information of features according to your data. You can also change other default parameters in the network, like hidden layers. The model could be fed into an Optimizer of BigDL or NNClassifier of analytics-zoo. Please refer to the document for more details. In this example, we demostrate how to use optimizer of BigDL.
###Code
wide_n_deep = WideAndDeep(5, column_info, "wide_n_deep")
###Output
creating: createZooWideAndDeep
###Markdown
Create optimizer and train the model
###Code
# Create an Optimizer
batch_size = 8000
optimizer = Optimizer(
model=wide_n_deep,
training_rdd=train_data,
criterion=ClassNLLCriterion(),
optim_method=Adam(learningrate = 0.001, learningrate_decay=0.00005),
end_trigger=MaxEpoch(10),
batch_size=batch_size)
# Set the validation logic
optimizer.set_validation(
batch_size=batch_size,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=[Top1Accuracy(), Loss(ClassNLLCriterion())]
)
log_dir='/tmp/bigdl_summaries/'
app_name='wide_n_deep-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
train_summary = TrainSummary(log_dir=log_dir,
app_name=app_name)
val_summary = ValidationSummary(log_dir=log_dir,
app_name=app_name)
optimizer.set_train_summary(train_summary)
optimizer.set_val_summary(val_summary)
print("saving logs to %s" % (log_dir + app_name))
###Output
creating: createClassNLLCriterion
creating: createAdam
creating: createMaxEpoch
creating: createDistriOptimizer
creating: createEveryEpoch
creating: createTop1Accuracy
creating: createClassNLLCriterion
creating: createLoss
creating: createTrainSummary
creating: createValidationSummary
saving logs to /tmp/bigdl_summaries/wide_n_deep-20180510-153601
###Markdown
Train the network. Wait some time till it finished.. Voila! You've got a trained model
###Code
%%time
# Boot training process
optimizer.optimize()
print("Optimization Done.")
###Output
Optimization Done.
CPU times: user 76.6 ms, sys: 23.9 ms, total: 101 ms
Wall time: 2min 21s
###Markdown
Prediction and recommendation * Zoo models make inferences based on the given data using model.predict(val_rdd) API. A result of RDD is returned. predict_class returns the predicted label.
###Code
results = wide_n_deep.predict(test_data)
results.take(5)
results_class = wide_n_deep.predict_class(test_data)
results_class.take(5)
###Output
_____no_output_____
###Markdown
* In the Analytics Zoo, Recommender has provied 3 unique APIs to predict user-item pairs and make recommendations for users or items given candidates.* Predict for user item pairs
###Code
userItemPairPrediction = wide_n_deep.predict_user_item_pair(valPairFeatureRdds)
for result in userItemPairPrediction.take(5): print(result)
###Output
UserItemPrediction [user_id: 3391, item_id: 26, prediction: 4, probability: 0.505555750808]
UserItemPrediction [user_id: 5576, item_id: 26, prediction: 4, probability: 0.426795236704]
UserItemPrediction [user_id: 963, item_id: 26, prediction: 3, probability: 0.450544390906]
UserItemPrediction [user_id: 3808, item_id: 26, prediction: 3, probability: 0.441043459719]
UserItemPrediction [user_id: 4508, item_id: 26, prediction: 3, probability: 0.445120355293]
###Markdown
* Recommend 3 items for each user given candidates in the feature RDDs
###Code
userRecs = wide_n_deep.recommend_for_user(valPairFeatureRdds, 3)
for result in userRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 1200, item_id: 1249, prediction: 5, probability: 0.43618989922]
UserItemPrediction [user_id: 1200, item_id: 2580, prediction: 4, probability: 0.382430271314]
UserItemPrediction [user_id: 1200, item_id: 3753, prediction: 4, probability: 0.372660889873]
UserItemPrediction [user_id: 2200, item_id: 3697, prediction: 3, probability: 0.452656332471]
UserItemPrediction [user_id: 2200, item_id: 196, prediction: 3, probability: 0.431529754179]
###Markdown
* Recommend 3 users for each item given candidates in the feature RDDs
###Code
itemRecs = wide_n_deep.recommend_for_item(valPairFeatureRdds, 3)
for result in itemRecs.take(5): print(result)
###Output
UserItemPrediction [user_id: 1856, item_id: 1200, prediction: 5, probability: 0.877071348293]
UserItemPrediction [user_id: 3853, item_id: 1200, prediction: 5, probability: 0.797336803939]
UserItemPrediction [user_id: 953, item_id: 1200, prediction: 5, probability: 0.794792014027]
UserItemPrediction [user_id: 1349, item_id: 2000, prediction: 5, probability: 0.790950447552]
UserItemPrediction [user_id: 288, item_id: 2000, prediction: 5, probability: 0.771050306327]
###Markdown
Evaluate the trained model
###Code
%%time
evaluate_result=wide_n_deep.evaluate(test_data, 2800, [Top1Accuracy()])
print("Top1 accuracy: %s" % evaluate_result[0].result)
###Output
creating: createTop1Accuracy
Top1 accuracy: 0.464258462191
CPU times: user 25.2 ms, sys: 7.76 ms, total: 33 ms
Wall time: 2.16 s
###Markdown
Draw the convergence curve
###Code
loss = np.array(train_summary.read_scalar("Loss"))
top1 = np.array(val_summary.read_scalar("Top1Accuracy"))
plt.figure(figsize = (12,12))
plt.subplot(2,1,1)
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
plt.subplot(2,1,2)
plt.plot(top1[:,0],top1[:,1],label='top1')
plt.xlim(0,loss.shape[0]+10)
plt.title("top1 accuracy")
plt.grid(True)
valPairFeatureRdds.unpersist()
###Output
_____no_output_____ |
MNIST_MLP.ipynb | ###Markdown
###Code
# BASED ON RASCHKA (2015)
import os
import struct
import numpy as np
import pandas
import time
import matplotlib.pyplot as plt
%matplotlib inline
# MIGHT NEED TO CHANGE PATH BASED ON HOW YOU EXTRACT MNIST
# THIS FILE ADDS *-labels DOT idx*-ubyte TO PATH
def load_trmnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path, '%s-labels.idx1-ubyte' % kind)
images_path = os.path.join(path, '%s-images.idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
return images, labels
# MIGHT NEED TO CHANGE PATH BASED ON HOW YOU EXTRACT MNIST
# THIS FILE ADDS *-labels DOT idx*-ubyte TO PATH
def load_temnist(path, kind='t10k'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path, '%s-labels.idx1-ubyte' % kind)
images_path = os.path.join(path, '%s-images.idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
return images, labels
#Packages
import os
print(os.listdir('./'))
from google.colab import drive
drive.mount('/content/drive')
os.chdir('./drive/My Drive/Colab Notebooks/data')
print(os.listdir())
X_train, y_train = load_trmnist('./mnist_data/', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_temnist('mnist_data/', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
# VISUALIZE EACH OF THE DIGITS
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_all.png', dpi=300)
plt.show()
# VISUALIZE DIFFERENT VARIATIONS OF 9
fig, ax = plt.subplots(nrows=5, ncols=4, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(20):
img = X_train[y_train == 9][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_9.png', dpi=300)
plt.show()
# CODE FROM RASCHKA (2015) TO OUTPUT TO CSV
# REQUIRES LARGE STORAGE AND SLOW PROCESSING TIME TO READ CSV BACK
# RECOMMEND STICKING WITH IMPORTED UBYTE FROM ABOVE
# np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')
# np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')
# X_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')
# y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')
# np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')
# np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')
# X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')
# y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')
# FIRST PASS WITH MLP
import numpy as np
from scipy.special import expit
import sys
class NeuralNetMLP(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
# FUNCTION FOR FITTING MLP
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[:, idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X_data[idx], self.w1, self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn = NeuralNetMLP(n_output=10,
n_features=X_train.shape[1],
n_hidden=50,
l2=0.1,
l1=0.0,
epochs=1000,
eta=0.001,
alpha=0.001,
decrease_const=0.00001,
minibatches=50,
shuffle=True,
random_state=1)
start = time.time()
nn.fit(X_train, y_train, print_progress=True)
end = time.time()
final_time = end-start
print(final_time)
# ROUGH PLOT FOR EACH OF THE 50 BATCH RUNS
plt.plot(range(len(nn.cost_)), nn.cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
batches = np.array_split(range(len(nn.cost_)), 1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)), cost_avgs, color='red')
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
plt.tight_layout()
#plt.savefig('./figures/cost2.png', dpi=300)
plt.show()
import sys
y_train_pred = nn.predict(X_train)
if sys.version_info < (3, 0):
acc = (np.sum(y_train == y_train_pred, axis=0)).astype('float') / X_train.shape[0]
else:
acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (acc * 100))
y_test_pred = nn.predict(X_test)
if sys.version_info < (3, 0):
acc = (np.sum(y_test == y_test_pred, axis=0)).astype('float') / X_test.shape[0]
else:
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (acc * 100))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab= y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab= y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('./figures/mnist_miscl.png', dpi=300)
plt.show()
# GRADIENT VERSION
class MLPGradientCheck(object):
""" Feedforward neural network / Multi-layer perceptron classifier.
Parameters
------------
n_output : int
Number of output units, should be equal to the
number of unique class labels.
n_features : int
Number of features (dimensions) in the target dataset.
Should be equal to the number of columns in the X array.
n_hidden : int (default: 30)
Number of hidden units.
l1 : float (default: 0.0)
Lambda value for L1-regularization.
No regularization if l1=0.0 (default)
l2 : float (default: 0.0)
Lambda value for L2-regularization.
No regularization if l2=0.0 (default)
epochs : int (default: 500)
Number of passes over the training set.
eta : float (default: 0.001)
Learning rate.
alpha : float (default: 0.0)
Momentum constant. Factor multiplied with the
gradient of the previous epoch t-1 to improve
learning speed
w(t) := w(t) - (grad(t) + alpha*grad(t-1))
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
shuffle : bool (default: False)
Shuffles training data every epoch if True to prevent circles.
minibatches : int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
Attributes
-----------
cost_ : list
Sum of squared errors after each epoch.
"""
def __init__(self, n_output, n_features, n_hidden=30,
l1=0.0, l2=0.0, epochs=500, eta=0.001,
alpha=0.0, decrease_const=0.0, shuffle=True,
minibatches=1, random_state=None):
np.random.seed(random_state)
self.n_output = n_output
self.n_features = n_features
self.n_hidden = n_hidden
self.w1, self.w2 = self._initialize_weights()
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.eta = eta
self.alpha = alpha
self.decrease_const = decrease_const
self.shuffle = shuffle
self.minibatches = minibatches
def _encode_labels(self, y, k):
"""Encode labels into one-hot representation
Parameters
------------
y : array, shape = [n_samples]
Target values.
Returns
-----------
onehot : array, shape = (n_labels, n_samples)
"""
onehot = np.zeros((k, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
def _initialize_weights(self):
"""Initialize weights with small random numbers."""
w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))
w1 = w1.reshape(self.n_hidden, self.n_features + 1)
w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))
w2 = w2.reshape(self.n_output, self.n_hidden + 1)
return w1, w2
def _sigmoid(self, z):
"""Compute logistic function (sigmoid)
Uses scipy.special.expit to avoid overflow
error for very small input values z.
"""
# return 1.0 / (1.0 + np.exp(-z))
return expit(z)
def _sigmoid_gradient(self, z):
"""Compute gradient of the logistic function"""
sg = self._sigmoid(z)
return sg * (1 - sg)
def _add_bias_unit(self, X, how='column'):
"""Add bias unit (column or row of 1s) to array at index 0"""
if how == 'column':
X_new = np.ones((X.shape[0], X.shape[1]+1))
X_new[:, 1:] = X
elif how == 'row':
X_new = np.ones((X.shape[0]+1, X.shape[1]))
X_new[1:, :] = X
else:
raise AttributeError('`how` must be `column` or `row`')
return X_new
def _feedforward(self, X, w1, w2):
"""Compute feedforward step
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
----------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
z3 : array, shape = [n_output_units, n_samples]
Net input of output layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
"""
a1 = self._add_bias_unit(X, how='column')
z2 = w1.dot(a1.T)
a2 = self._sigmoid(z2)
a2 = self._add_bias_unit(a2, how='row')
z3 = w2.dot(a2)
a3 = self._sigmoid(z3)
return a1, z2, a2, z3, a3
def _L2_reg(self, lambda_, w1, w2):
"""Compute L2-regularization cost"""
return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))
def _L1_reg(self, lambda_, w1, w2):
"""Compute L1-regularization cost"""
return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())
def _get_cost(self, y_enc, output, w1, w2):
"""Compute cost function.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
output : array, shape = [n_output_units, n_samples]
Activation of the output layer (feedforward)
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
cost : float
Regularized cost.
"""
term1 = -y_enc * (np.log(output))
term2 = (1 - y_enc) * np.log(1 - output)
cost = np.sum(term1 - term2)
L1_term = self._L1_reg(self.l1, w1, w2)
L2_term = self._L2_reg(self.l2, w1, w2)
cost = cost + L1_term + L2_term
return cost
def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):
""" Compute gradient step using backpropagation.
Parameters
------------
a1 : array, shape = [n_samples, n_features+1]
Input values with bias unit.
a2 : array, shape = [n_hidden+1, n_samples]
Activation of hidden layer.
a3 : array, shape = [n_output_units, n_samples]
Activation of output layer.
z2 : array, shape = [n_hidden, n_samples]
Net input of hidden layer.
y_enc : array, shape = (n_labels, n_samples)
one-hot encoded class labels.
w1 : array, shape = [n_hidden_units, n_features]
Weight matrix for input layer -> hidden layer.
w2 : array, shape = [n_output_units, n_hidden_units]
Weight matrix for hidden layer -> output layer.
Returns
---------
grad1 : array, shape = [n_hidden_units, n_features]
Gradient of the weight matrix w1.
grad2 : array, shape = [n_output_units, n_hidden_units]
Gradient of the weight matrix w2.
"""
# backpropagation
sigma3 = a3 - y_enc
z2 = self._add_bias_unit(z2, how='row')
sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)
sigma2 = sigma2[1:, :]
grad1 = sigma2.dot(a1)
grad2 = sigma3.dot(a2.T)
# regularize
grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))
grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))
return grad1, grad2
def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):
""" Apply gradient checking (for debugging only)
Returns
---------
relative_error : float
Relative error between the numerically
approximated gradients and the backpropagated gradients.
"""
num_grad1 = np.zeros(np.shape(w1))
epsilon_ary1 = np.zeros(np.shape(w1))
for i in range(w1.shape[0]):
for j in range(w1.shape[1]):
epsilon_ary1[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1 - epsilon_ary1, w2)
cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1 + epsilon_ary1, w2)
cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)
num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary1[i, j] = 0
num_grad2 = np.zeros(np.shape(w2))
epsilon_ary2 = np.zeros(np.shape(w2))
for i in range(w2.shape[0]):
for j in range(w2.shape[1]):
epsilon_ary2[i, j] = epsilon
a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 - epsilon_ary2)
cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)
a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 + epsilon_ary2)
cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)
num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)
epsilon_ary2[i, j] = 0
num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))
grad = np.hstack((grad1.flatten(), grad2.flatten()))
norm1 = np.linalg.norm(num_grad - grad)
norm2 = np.linalg.norm(num_grad)
norm3 = np.linalg.norm(grad)
relative_error = norm1 / (norm2 + norm3)
return relative_error
def predict(self, X):
"""Predict class labels
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
Returns:
----------
y_pred : array, shape = [n_samples]
Predicted class labels.
"""
if len(X.shape) != 2:
raise AttributeError('X must be a [n_samples, n_features] array.\n'
'Use X[:,None] for 1-feature classification,'
'\nor X[[i]] for 1-sample classification')
a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)
y_pred = np.argmax(z3, axis=0)
return y_pred
def fit(self, X, y, print_progress=False):
""" Learn weights from training data.
Parameters
-----------
X : array, shape = [n_samples, n_features]
Input layer with original features.
y : array, shape = [n_samples]
Target class labels.
print_progress : bool (default: False)
Prints progress as the number of epochs
to stderr.
Returns:
----------
self
"""
self.cost_ = []
X_data, y_data = X.copy(), y.copy()
y_enc = self._encode_labels(y, self.n_output)
delta_w1_prev = np.zeros(self.w1.shape)
delta_w2_prev = np.zeros(self.w2.shape)
for i in range(self.epochs):
# adaptive learning rate
self.eta /= (1 + self.decrease_const*i)
if print_progress:
sys.stderr.write('\rEpoch: %d/%d' % (i+1, self.epochs))
sys.stderr.flush()
if self.shuffle:
idx = np.random.permutation(y_data.shape[0])
X_data, y_enc = X_data[idx], y_enc[idx]
mini = np.array_split(range(y_data.shape[0]), self.minibatches)
for idx in mini:
# feedforward
a1, z2, a2, z3, a3 = self._feedforward(X[idx], self.w1, self.w2)
cost = self._get_cost(y_enc=y_enc[:, idx],
output=a3,
w1=self.w1,
w2=self.w2)
self.cost_.append(cost)
# compute gradient via backpropagation
grad1, grad2 = self._get_gradient(a1=a1, a2=a2,
a3=a3, z2=z2,
y_enc=y_enc[:, idx],
w1=self.w1,
w2=self.w2)
## start gradient checking
grad_diff = self._gradient_checking(X=X_data[idx], y_enc=y_enc[:, idx],
w1=self.w1, w2=self.w2,
epsilon=1e-5,
grad1=grad1, grad2=grad2)
if grad_diff <= 1e-7:
print('Ok: %s' % grad_diff)
elif grad_diff <= 1e-4:
print('Warning: %s' % grad_diff)
else:
print('PROBLEM: %s' % grad_diff)
# update weights; [alpha * delta_w_prev] for momentum learning
delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2
self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))
self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))
delta_w1_prev, delta_w2_prev = delta_w1, delta_w2
return self
nn_check = MLPGradientCheck(n_output=10,
n_features=X_train.shape[1],
n_hidden=10,
l2=0.0,
l1=0.0,
epochs=100,
eta=0.001,
alpha=0.0,
decrease_const=0.0,
minibatches=1,
shuffle=False,
random_state=1)
#nn_check.fit(X_train[:5], y_train[:5], print_progress=FALSE)
nn_check.fit(X_train, y_train, print_progress=True)
# ACCURACY WITH GRADIENT CHECK
y_test_pred = nn_check.predict(X_test)
if sys.version_info < (3, 0):
acc = (np.sum(y_test == y_test_pred, axis=0)).astype('float') / X_test.shape[0]
else:
acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (acc * 100))
###Output
_____no_output_____
###Markdown
Network Part
###Code
# define the network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax())
print(model)
opt = torch.optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
loss_fn = nn.CrossEntropyLoss()
epochs=15
for e in range(epochs):
total_loss = 0
for images, labels, in train_loader:
images = images.view(images.shape[0], -1) # flatten image
output = model(images)
loss = loss_fn(output, labels)
opt.zero_grad()
loss.backward()
opt.step() # update the weights
total_loss += loss.item()
else:
print("Epoch {} - Training loss: {}".format(e, total_loss/len(train_loader)))
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images = images.view(images.shape[0], -1)
output = model(images)
_, predicted = torch.max(output.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
###Output
Accuracy of the network on the 10000 test images: 86 %
|
Colab_notebooks/Template_ZeroCostDL4Mic.ipynb | ###Markdown
This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information. Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work. If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. **Name of the Network**--- Description of the network and link to publication with author reference. [author et al, etc.](URL).---*Disclaimer*:This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**This notebook is based on the following paper: **Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)And source code found in: *provide github link or equivalent if applicable*Provide information on dataset availability and link for download if applicable.**Please also cite this original paper when using or developing this notebook.** **How to use this notebook?**---Video describing how to use ZeroCostDL4Mic notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- Give information on the required structure and dataype of the training dataset. Provide information on quality control dataset, such as:**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. **Additionally, the corresponding input and output files need to have the same name**. Please note that you currently can **only use .tif files!**Here's a common data structure that can work:* Experiment A - **Training dataset** - Low SNR images (Training_source) - img_1.tif, img_2.tif, ... - High SNR images (Training_target) - img_1.tif, img_2.tif, ... - **Quality control dataset** - Low SNR images - img_1.tif, img_2.tif - High SNR images - img_1.tif, img_2.tif - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
%tensorflow_version 1.x
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime settings are correct then Google did not allocate GPU to your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install Name of the network and dependencies**---
###Code
#@markdown ##<font color=orange>Install Network and dependencies
#Libraries contains information of certain topics.
#Put the imported code and libraries here
print("Depencies installed and imported.")
###Output
_____no_output_____
###Markdown
**3. Select your paths and parameters**---The code below allows the user to enter the paths to where the training data is and to define the training parameters. **3.1. Setting the main training parameters**--- **Paths for training, predictions and results** Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training parameters****`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:****`other_parameters`:**Give other parameters or default values **Default value:****If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** **Advanced parameters - experienced users only****`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size****`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16****`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10**
###Code
class bcolors:
WARNING = '\033[31m'
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
# Ground truth images
Training_target = "" #@param {type:"string"}
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 50#@param {type:"number"}
#@markdown Other parameters, add as necessary
other_parameters = 80#@param {type:"number"} # in pixels
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
number_of_steps = 400#@param {type:"number"}
batch_size = 16#@param {type:"number"}
percentage_validation = 10 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 16
percentage_validation = 10
#Here we define the percentage to use for validation
percentage = percentage_validation/100
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
# The shape of the images.
x = imread(InputFile)
y = imread(OutputFile)
print('Loaded Input images (number, width, length) =', x.shape)
print('Loaded Output images (number, width, length) =', y.shape)
print("Parameters initiated.")
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imread(Training_source+"/"+random_choice)
# Here we check that the input images contains the expected dimensions
if len(x.shape) == 2:
print("Image dimensions (y,x)",x.shape)
if not len(x.shape) == 2:
print(bcolors.WARNING +"Your images appear to have the wrong dimensions. Image dimension",x.shape)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
#Hyperparameters failsafes
# Here we check that patch_size is smaller than the smallest xy dimension of the image
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 8
if not patch_size % 8 == 0:
patch_size = ((int(patch_size / 8)-1) * 8)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:",patch_size)
os.chdir(Training_target)
y = imread(Training_target+"/"+random_choice)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.Add any other information which is necessary to run augmentation with your notebook/data.
###Code
#@markdown ###<font color = orange>Add any further useful augmentations
Use_Data_augmentation = False #@param{type:"boolean"}
#@markdown Select this option if you want to use augmentation to increase the size of your dataset
#@markdown **Rotate each image 3 times by 90 degrees.**
Rotation = True #@param{type:"boolean"}
#@markdown **Flip each image once around the x axis of the stack.**
Flip = True #@param{type:"boolean"}
#@markdown **Would you like to save your augmented images?**
Save_augmented_images = False #@param {type:"boolean"}
Saving_path = "" #@param {type:"string"}
if not Save_augmented_images:
Saving_path= "/content"
def rotation_aug(Source_path, Target_path, flip=False):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
# Source Rotation
source_img_90 = np.rot90(source_img,axes=(1,2))
source_img_180 = np.rot90(source_img_90,axes=(1,2))
source_img_270 = np.rot90(source_img_180,axes=(1,2))
# Target Rotation
target_img_90 = np.rot90(target_img,axes=(1,2))
target_img_180 = np.rot90(target_img_90,axes=(1,2))
target_img_270 = np.rot90(target_img_180,axes=(1,2))
# Add a flip to the rotation
if flip == True:
source_img_lr = np.fliplr(source_img)
source_img_90_lr = np.fliplr(source_img_90)
source_img_180_lr = np.fliplr(source_img_180)
source_img_270_lr = np.fliplr(source_img_270)
target_img_lr = np.fliplr(target_img)
target_img_90_lr = np.fliplr(target_img_90)
target_img_180_lr = np.fliplr(target_img_180)
target_img_270_lr = np.fliplr(target_img_270)
#source_img_90_ud = np.flipud(source_img_90)
# Save the augmented files
# Source images
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)
# Target images
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)
if flip == True:
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)
def flip(Source_path, Target_path):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
source_img_lr = np.fliplr(source_img)
target_img_lr = np.fliplr(target_img)
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
if Use_Data_augmentation:
if os.path.exists(Saving_path+'/augmented_source'):
shutil.rmtree(Saving_path+'/augmented_source')
os.mkdir(Saving_path+'/augmented_source')
if os.path.exists(Saving_path+'/augmented_target'):
shutil.rmtree(Saving_path+'/augmented_target')
os.mkdir(Saving_path+'/augmented_target')
print("Data augmentation enabled")
print("Data augmentation in progress....")
if Rotation == True:
rotation_aug(Training_source,Training_target,flip=Flip)
elif Rotation == False and Flip == True:
flip(Training_source,Training_target)
print("Done")
if not Use_Data_augmentation:
print(bcolors.WARNING+"Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
pretrained_model_choice = "Model_from_file" #@param ["Model_from_file"]
Weights_choice = "last" #@param ["last", "best"]
#@markdown ###If you chose "Model_from_file", please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
# --------------------- Load the model from the choosen path ------------------------
if pretrained_model_choice == "Model_from_file":
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Download the a model provided in the XXX ------------------------
if pretrained_model_choice == "Model_name":
pretrained_model_name = "Model_name"
pretrained_model_path = "/content/"+pretrained_model_name
print("Downloading the 2D_Demo_Model_from_Stardist_2D_paper")
if os.path.exists(pretrained_model_path):
shutil.rmtree(pretrained_model_path)
os.makedirs(pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Add additional pre-trained models here ------------------------
# --------------------- Check the model exist ------------------------
# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled,
if not os.path.exists(h5_file_path):
print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')
Use_pretrained_model = False
# If the model path contains a pretrain model, we load the training rate,
if os.path.exists(h5_file_path):
#Here we check if the learning rate can be loaded from the quality control folder
if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:
csvRead = pd.read_csv(csvfile, sep=',')
#print(csvRead)
if "learning rate" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)
print("pretrained network learning rate found")
#find the last learning rate
lastLearningRate = csvRead["learning rate"].iloc[-1]
#Find the learning rate corresponding to the lowest validation loss
min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]
#print(min_val_loss)
bestLearningRate = min_val_loss['learning rate'].iloc[-1]
if Weights_choice == "last":
print('Last learning rate: '+str(lastLearningRate))
if Weights_choice == "best":
print('Learning rate of best validation loss: '+str(bestLearningRate))
if not "learning rate" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)
#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used
if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
# Display info about the pretrained model to be loaded (or not)
if Use_pretrained_model:
print('Weights found in:')
print(h5_file_path)
print('will be loaded prior to training.')
else:
print(bcolors.WARNING+'No pretrained nerwork will be used.')
#@markdown ###<font color=orange> You will need to add or replace the code that loads any previously trained weights to the notebook here.
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Train the network**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches.
###Code
import time
import csv
start = time.time()
#@markdown ##<font color=orange>Start training
# Start Training
#Insert the code necessary to initiate training of your model
#Note that the notebook should load weights either from the model that is
#trained from scratch or if the pretrained weights are used (3.3.)
###Output
_____no_output_____
###Markdown
**4.2. Download your model(s) from Google Drive**---Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder. **5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.1. Inspection of the loss function**---First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.***Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.
###Code
#@markdown ##Play the cell to show a plot of training errors vs. epoch number
import csv
from matplotlib import pyplot as plt
lossDataFromCSV = []
vallossDataFromCSV = []
with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:
csvRead = csv.reader(csvfile, delimiter=',')
next(csvRead)
for row in csvRead:
lossDataFromCSV.append(float(row[0]))
vallossDataFromCSV.append(float(row[1]))
epochNumber = range(len(lossDataFromCSV))
plt.figure(figsize=(15,10))
plt.subplot(2,1,1)
plt.plot(epochNumber,lossDataFromCSV, label='Training loss')
plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (linear scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.subplot(2,1,2)
plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')
plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (log scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')
plt.show()
###Output
_____no_output_____
###Markdown
**5.2. Error mapping and quality metrics estimation**--- Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the "Source_QC_folder" and "Target_QC_folder" !**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
# Create a quality control/Prediction Folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
# Insert code to activate the pretrained model if necessary.
# List Tif images in Source_QC_folder
Source_QC_folder_tif = Source_QC_folder+"/*.tif"
Z = sorted(glob(Source_QC_folder_tif))
Z = list(map(imread,Z))
print('Number of test dataset found in the folder: '+str(len(Z)))
# Insert code to perform predictions on all datasets in the Source_QC folder
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/QC_metrics_"+QC_model_name+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = io.imread(os.path.join(Target_QC_folder, i))
# -------------------------------- Source test data --------------------------------
test_source = io.imread(os.path.join(Source_QC_folder,i))
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction",i))
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
#Save ssim_maps
img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)
img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)
img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
# All data is now processed saved
Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same
plt.figure(figsize=(15,15))
# Currently only displays the last computed set, from memory
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))
plt.imshow(img_GT)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))
plt.imshow(img_Source)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction/", Test_FileList[-1]))
plt.imshow(img_Prediction)
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---Fill the below code to perform predictions using your model.The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
Prediction_model_folder = "" #@param {type:"string"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Activate the (pre-)trained model
# Provide the code for performing predictions and saving them
print("Images saved into folder:", Result_folder)
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
x = imread(Data_folder+"/"+random_choice)
os.chdir(Result_folder)
y = imread(Result_folder+"/"+random_choice)
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.axis('off')
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.subplot(1,2,2)
plt.axis('off')
plt.imshow(y, interpolation='nearest')
plt.title('Predicted output');
###Output
_____no_output_____
###Markdown
This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information. Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work. If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. **Name of the Network**--- Description of the network and link to publication with author reference. [author et al, etc.](URL).---*Disclaimer*:This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**This notebook is based on the following paper: **Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)And source code found in: *provide github link or equivalent if applicable*Provide information on dataset availability and link for download if applicable.**Please also cite this original paper when using or developing this notebook.** **How to use this notebook?**---Video describing how to use ZeroCostDL4Mic notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- Give information on the required structure and dataype of the training dataset. Provide information on quality control dataset, such as:**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. **Additionally, the corresponding input and output files need to have the same name**. Please note that you currently can **only use .tif files!**Here's a common data structure that can work:* Experiment A - **Training dataset** - Low SNR images (Training_source) - img_1.tif, img_2.tif, ... - High SNR images (Training_target) - img_1.tif, img_2.tif, ... - **Quality control dataset** - Low SNR images - img_1.tif, img_2.tif - High SNR images - img_1.tif, img_2.tif - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Install Name of the network and dependencies**---
###Code
#@markdown ##<font color=orange>Install Network and dependencies
#Libraries contains information of certain topics.
#Put the imported code and libraries here
Notebook_version = ['1.12'] #Contact the ZeroCostDL4Mic team to find out about the version number
from builtins import any as b_any
def get_requirements_path():
# Store requirements file in 'contents' directory
current_dir = os.getcwd()
dir_count = current_dir.count('/') - 1
path = '../' * (dir_count) + 'requirements.txt'
return path
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
def build_requirements_file(before, after):
path = get_requirements_path()
# Exporting requirements.txt for local run
!pip freeze > $path
# Get minimum requirements file
df = pd.read_csv(path, delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# Replace with package name and handle cases where import name is different to module name
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
file=open(path,'w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
import sys
before = [str(m) for m in sys.modules]
!pip install fpdf
# Below are templates for the function definitions for the export
# of pdf summaries for training and qc. You will need to adjust these functions
# with the variables and other parameters as necessary to make them
# work for your project
from datetime import datetime
def pdf_export(trained = False, augmentation = False, pretrained_model = False):
# save FPDF() class into a
# variable pdf
#from datetime import datetime
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = "Your network's name"
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
if trained:
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and methods:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','Keras','csbdeep']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size*number_of_patches)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+config.train_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size*number_of_patches)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+config.train_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was re-trained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if augmentation:
aug_text = 'The dataset was augmented by a factor of '+str(Multiply_dataset_by)+' by'
if rotate_270_degrees != 0 or rotate_90_degrees != 0:
aug_text = aug_text+'\n- rotation'
if flip_left_right != 0 or flip_top_bottom != 0:
aug_text = aug_text+'\n- flipping'
if random_zoom_magnification != 0:
aug_text = aug_text+'\n- random zoom magnification'
if random_distortion != 0:
aug_text = aug_text+'\n- random distortion'
if image_shear != 0:
aug_text = aug_text+'\n- image shearing'
if skew_image != 0:
aug_text = aug_text+'\n- image skewing'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>number_of_patches</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{3}</td>
</tr>
<tr>
<td width = 50%>number_of_steps</td>
<td width = 50%>{4}</td>
</tr>
<tr>
<td width = 50%>percentage_validation</td>
<td width = 50%>{5}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{6}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),number_of_patches,batch_size,number_of_steps,percentage_validation,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(27, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread("/content/NetworkNameExampleData.png").shape
pdf.image("/content/NetworkNameExampleData.png", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "Democratising deep learning for microscopy with ZeroCostDL4Mic." Nature Communications (2021).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Your networks name: first author et al. "Title of publication" Journal, year'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
if augmentation:
ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
#Make a pdf summary of the QC results
def qc_pdf_export():
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = "Your network's name"
#model_name = os.path.basename(full_QC_model_path)
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if os.path.exists(full_QC_model_path+'Quality Control/lossCurvePlots.png'):
pdf.image(full_QC_model_path+'Quality Control/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/13))
else:
pdf.set_font('')
pdf.set_font('Arial', size=10)
pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.', align='L')
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
html = """
<body>
<font size="7" face="Courier New" >
<table width=94% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/QC_metrics_'+QC_model_name+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
NRMSE_PvsGT = header[3]
NRMSE_SvsGT = header[4]
PSNR_PvsGT = header[5]
PSNR_SvsGT = header[6]
header = """
<tr>
<th width = 10% align="left">{0}</th>
<th width = 15% align="left">{1}</th>
<th width = 15% align="center">{2}</th>
<th width = 15% align="left">{3}</th>
<th width = 15% align="center">{4}</th>
<th width = 15% align="left">{5}</th>
<th width = 15% align="center">{6}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT,NRMSE_PvsGT,NRMSE_SvsGT,PSNR_PvsGT,PSNR_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
NRMSE_PvsGT = row[3]
NRMSE_SvsGT = row[4]
PSNR_PvsGT = row[5]
PSNR_SvsGT = row[6]
cells = """
<tr>
<td width = 10% align="left">{0}</td>
<td width = 15% align="center">{1}</td>
<td width = 15% align="center">{2}</td>
<td width = 15% align="center">{3}</td>
<td width = 15% align="center">{4}</td>
<td width = 15% align="center">{5}</td>
<td width = 15% align="center">{6}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)),str(round(float(NRMSE_PvsGT),3)),str(round(float(NRMSE_SvsGT),3)),str(round(float(PSNR_PvsGT),3)),str(round(float(PSNR_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "Democratising deep learning for microscopy with ZeroCostDL4Mic." Nature Communications (2021).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Your networks name: first author et al. "Title of publication" Journal, year'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
print("Depencies installed and imported.")
# Build requirements file for local run
# -- the developers should leave this below all the other installations
after = [str(m) for m in sys.modules]
build_requirements_file(before, after)
###Output
_____no_output_____
###Markdown
**2. Initialise the Colab session**--- **2.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelerator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
%tensorflow_version 1.x
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime settings are correct then Google did not allocate GPU to your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
###Output
_____no_output_____
###Markdown
**2.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**3. Select your paths and parameters**---The code below allows the user to enter the paths to where the training data is and to define the training parameters. **3.1. Setting the main training parameters**--- **Paths for training, predictions and results** Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training parameters****`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:****`other_parameters`:**Give other parameters or default values **Default value:****If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** **Advanced parameters - experienced users only****`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size****`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16****`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10**
###Code
class bcolors:
WARNING = '\033[31m'
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
# Ground truth images
Training_target = "" #@param {type:"string"}
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 50#@param {type:"number"}
#@markdown Other parameters, add as necessary
other_parameters = 80#@param {type:"number"} # in pixels
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
number_of_steps = 400#@param {type:"number"}
batch_size = 16#@param {type:"number"}
percentage_validation = 10 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 16
percentage_validation = 10
#Here we define the percentage to use for validation
percentage = percentage_validation/100
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
# The shape of the images.
x = imread(InputFile)
y = imread(OutputFile)
print('Loaded Input images (number, width, length) =', x.shape)
print('Loaded Output images (number, width, length) =', y.shape)
print("Parameters initiated.")
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imread(Training_source+"/"+random_choice)
# Here we check that the input images contains the expected dimensions
if len(x.shape) == 2:
print("Image dimensions (y,x)",x.shape)
if not len(x.shape) == 2:
print(bcolors.WARNING +"Your images appear to have the wrong dimensions. Image dimension",x.shape)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
#Hyperparameters failsafes
# Here we check that patch_size is smaller than the smallest xy dimension of the image
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 8
if not patch_size % 8 == 0:
patch_size = ((int(patch_size / 8)-1) * 8)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:",patch_size)
os.chdir(Training_target)
y = imread(Training_target+"/"+random_choice)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
#We save the example data here to use it in the pdf export of the training
plt.savefig('/content/NetworkNameExampleData.png', bbox_inches='tight', pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.Add any other information which is necessary to run augmentation with your notebook/data.
###Code
#@markdown ###<font color = orange>Add any further useful augmentations
Use_Data_augmentation = False #@param{type:"boolean"}
#@markdown Select this option if you want to use augmentation to increase the size of your dataset
#@markdown **Rotate each image 3 times by 90 degrees.**
Rotation = True #@param{type:"boolean"}
#@markdown **Flip each image once around the x axis of the stack.**
Flip = True #@param{type:"boolean"}
#@markdown **Would you like to save your augmented images?**
Save_augmented_images = False #@param {type:"boolean"}
Saving_path = "" #@param {type:"string"}
if not Save_augmented_images:
Saving_path= "/content"
def rotation_aug(Source_path, Target_path, flip=False):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
# Source Rotation
source_img_90 = np.rot90(source_img,axes=(1,2))
source_img_180 = np.rot90(source_img_90,axes=(1,2))
source_img_270 = np.rot90(source_img_180,axes=(1,2))
# Target Rotation
target_img_90 = np.rot90(target_img,axes=(1,2))
target_img_180 = np.rot90(target_img_90,axes=(1,2))
target_img_270 = np.rot90(target_img_180,axes=(1,2))
# Add a flip to the rotation
if flip == True:
source_img_lr = np.fliplr(source_img)
source_img_90_lr = np.fliplr(source_img_90)
source_img_180_lr = np.fliplr(source_img_180)
source_img_270_lr = np.fliplr(source_img_270)
target_img_lr = np.fliplr(target_img)
target_img_90_lr = np.fliplr(target_img_90)
target_img_180_lr = np.fliplr(target_img_180)
target_img_270_lr = np.fliplr(target_img_270)
#source_img_90_ud = np.flipud(source_img_90)
# Save the augmented files
# Source images
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)
# Target images
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)
if flip == True:
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)
def flip(Source_path, Target_path):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
source_img_lr = np.fliplr(source_img)
target_img_lr = np.fliplr(target_img)
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
if Use_Data_augmentation:
if os.path.exists(Saving_path+'/augmented_source'):
shutil.rmtree(Saving_path+'/augmented_source')
os.mkdir(Saving_path+'/augmented_source')
if os.path.exists(Saving_path+'/augmented_target'):
shutil.rmtree(Saving_path+'/augmented_target')
os.mkdir(Saving_path+'/augmented_target')
print("Data augmentation enabled")
print("Data augmentation in progress....")
if Rotation == True:
rotation_aug(Training_source,Training_target,flip=Flip)
elif Rotation == False and Flip == True:
flip(Training_source,Training_target)
print("Done")
if not Use_Data_augmentation:
print(bcolors.WARNING+"Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
pretrained_model_choice = "Model_from_file" #@param ["Model_from_file"]
Weights_choice = "last" #@param ["last", "best"]
#@markdown ###If you chose "Model_from_file", please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
# --------------------- Load the model from the choosen path ------------------------
if pretrained_model_choice == "Model_from_file":
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Download the a model provided in the XXX ------------------------
if pretrained_model_choice == "Model_name":
pretrained_model_name = "Model_name"
pretrained_model_path = "/content/"+pretrained_model_name
print("Downloading the 2D_Demo_Model_from_Stardist_2D_paper")
if os.path.exists(pretrained_model_path):
shutil.rmtree(pretrained_model_path)
os.makedirs(pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Add additional pre-trained models here ------------------------
# --------------------- Check the model exist ------------------------
# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled,
if not os.path.exists(h5_file_path):
print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')
Use_pretrained_model = False
# If the model path contains a pretrain model, we load the training rate,
if os.path.exists(h5_file_path):
#Here we check if the learning rate can be loaded from the quality control folder
if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:
csvRead = pd.read_csv(csvfile, sep=',')
#print(csvRead)
if "learning rate" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)
print("pretrained network learning rate found")
#find the last learning rate
lastLearningRate = csvRead["learning rate"].iloc[-1]
#Find the learning rate corresponding to the lowest validation loss
min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]
#print(min_val_loss)
bestLearningRate = min_val_loss['learning rate'].iloc[-1]
if Weights_choice == "last":
print('Last learning rate: '+str(lastLearningRate))
if Weights_choice == "best":
print('Learning rate of best validation loss: '+str(bestLearningRate))
if not "learning rate" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)
#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used
if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
# Display info about the pretrained model to be loaded (or not)
if Use_pretrained_model:
print('Weights found in:')
print(h5_file_path)
print('will be loaded prior to training.')
else:
print(bcolors.WARNING+'No pretrained nerwork will be used.')
#@markdown ###<font color=orange> You will need to add or replace the code that loads any previously trained weights to the notebook here.
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Train the network**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches.Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder.
###Code
import time
import csv
# Export the training parameters as pdf (before training, in case training fails)
pdf_export(augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
start = time.time()
#@markdown ##<font color=orange>Start training
# Start Training
#Insert the code necessary to initiate training of your model
#Note that the notebook should load weights either from the model that is
#trained from scratch or if the pretrained weights are used (3.3.)
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# Export the training parameters as pdf (after training)
pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
###Output
_____no_output_____
###Markdown
**5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.1. Inspection of the loss function**---First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.***Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.
###Code
#@markdown ##Play the cell to show a plot of training errors vs. epoch number
import csv
from matplotlib import pyplot as plt
lossDataFromCSV = []
vallossDataFromCSV = []
with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:
csvRead = csv.reader(csvfile, delimiter=',')
next(csvRead)
for row in csvRead:
lossDataFromCSV.append(float(row[0]))
vallossDataFromCSV.append(float(row[1]))
epochNumber = range(len(lossDataFromCSV))
plt.figure(figsize=(15,10))
plt.subplot(2,1,1)
plt.plot(epochNumber,lossDataFromCSV, label='Training loss')
plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (linear scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.subplot(2,1,2)
plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')
plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (log scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')
plt.show()
###Output
_____no_output_____
###Markdown
**5.2. Error mapping and quality metrics estimation**--- Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the "Source_QC_folder" and "Target_QC_folder" !**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
# Create a quality control/Prediction Folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
# Insert code to activate the pretrained model if necessary.
# List Tif images in Source_QC_folder
Source_QC_folder_tif = Source_QC_folder+"/*.tif"
Z = sorted(glob(Source_QC_folder_tif))
Z = list(map(imread,Z))
print('Number of test dataset found in the folder: '+str(len(Z)))
# Insert code to perform predictions on all datasets in the Source_QC folder
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/QC_metrics_"+QC_model_name+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = io.imread(os.path.join(Target_QC_folder, i))
# -------------------------------- Source test data --------------------------------
test_source = io.imread(os.path.join(Source_QC_folder,i))
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction",i))
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
#Save ssim_maps
img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)
img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)
img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
# All data is now processed saved
Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same
plt.figure(figsize=(15,15))
# Currently only displays the last computed set, from memory
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))
plt.imshow(img_GT)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))
plt.imshow(img_Source)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction/", Test_FileList[-1]))
plt.imshow(img_Prediction)
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
#Make a pdf summary of the QC results
qc_pdf_export()
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---Fill the below code to perform predictions using your model.The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
Prediction_model_folder = "" #@param {type:"string"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Activate the (pre-)trained model
# Provide the code for performing predictions and saving them
print("Images saved into folder:", Result_folder)
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
x = imread(Data_folder+"/"+random_choice)
os.chdir(Result_folder)
y = imread(Result_folder+"/"+random_choice)
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.axis('off')
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.subplot(1,2,2)
plt.axis('off')
plt.imshow(y, interpolation='nearest')
plt.title('Predicted output');
###Output
_____no_output_____
###Markdown
This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information. Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work. If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. **Name of the Network**--- Description of the network and link to publication with author reference. [author et al, etc.](URL).---*Disclaimer*:This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**This notebook is based on the following paper: **Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)And source code found in: *provide github link or equivalent if applicable*Provide information on dataset availability and link for download if applicable.**Please also cite this original paper when using or developing this notebook.** **How to use this notebook?**---Video describing how to use ZeroCostDL4Mic notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- Give information on the required structure and dataype of the training dataset. Provide information on quality control dataset, such as:**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. **Additionally, the corresponding input and output files need to have the same name**. Please note that you currently can **only use .tif files!**Here's a common data structure that can work:* Experiment A - **Training dataset** - Low SNR images (Training_source) - img_1.tif, img_2.tif, ... - High SNR images (Training_target) - img_1.tif, img_2.tif, ... - **Quality control dataset** - Low SNR images - img_1.tif, img_2.tif - High SNR images - img_1.tif, img_2.tif - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
%tensorflow_version 1.x
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime settings are correct then Google did not allocate GPU to your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install Name of the network and dependencies**---
###Code
#@markdown ##<font color=orange>Install Network and dependencies
#Libraries contains information of certain topics.
#Put the imported code and libraries here
Notebook_version = ['1.11'] #Contact the ZeroCostDL4Mic team to find out about the version number
!pip install fpdf
print("Depencies installed and imported.")
# Exporting requirements.txt for local run
# -- the developers should leave this below all the other installations
!pip freeze > requirements.txt
###Output
_____no_output_____
###Markdown
**3. Select your paths and parameters**---The code below allows the user to enter the paths to where the training data is and to define the training parameters. **3.1. Setting the main training parameters**--- **Paths for training, predictions and results** Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training parameters****`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:****`other_parameters`:**Give other parameters or default values **Default value:****If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** **Advanced parameters - experienced users only****`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size****`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16****`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10**
###Code
class bcolors:
WARNING = '\033[31m'
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
# Ground truth images
Training_target = "" #@param {type:"string"}
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 50#@param {type:"number"}
#@markdown Other parameters, add as necessary
other_parameters = 80#@param {type:"number"} # in pixels
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
number_of_steps = 400#@param {type:"number"}
batch_size = 16#@param {type:"number"}
percentage_validation = 10 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 16
percentage_validation = 10
#Here we define the percentage to use for validation
percentage = percentage_validation/100
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
# The shape of the images.
x = imread(InputFile)
y = imread(OutputFile)
print('Loaded Input images (number, width, length) =', x.shape)
print('Loaded Output images (number, width, length) =', y.shape)
print("Parameters initiated.")
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imread(Training_source+"/"+random_choice)
# Here we check that the input images contains the expected dimensions
if len(x.shape) == 2:
print("Image dimensions (y,x)",x.shape)
if not len(x.shape) == 2:
print(bcolors.WARNING +"Your images appear to have the wrong dimensions. Image dimension",x.shape)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
#Hyperparameters failsafes
# Here we check that patch_size is smaller than the smallest xy dimension of the image
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 8
if not patch_size % 8 == 0:
patch_size = ((int(patch_size / 8)-1) * 8)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:",patch_size)
os.chdir(Training_target)
y = imread(Training_target+"/"+random_choice)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
#We save the example data here to use it in the pdf export of the training
plt.savefig('/content/NetworkNameExampleData.png', bbox_inches='tight', pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.Add any other information which is necessary to run augmentation with your notebook/data.
###Code
#@markdown ###<font color = orange>Add any further useful augmentations
Use_Data_augmentation = False #@param{type:"boolean"}
#@markdown Select this option if you want to use augmentation to increase the size of your dataset
#@markdown **Rotate each image 3 times by 90 degrees.**
Rotation = True #@param{type:"boolean"}
#@markdown **Flip each image once around the x axis of the stack.**
Flip = True #@param{type:"boolean"}
#@markdown **Would you like to save your augmented images?**
Save_augmented_images = False #@param {type:"boolean"}
Saving_path = "" #@param {type:"string"}
if not Save_augmented_images:
Saving_path= "/content"
def rotation_aug(Source_path, Target_path, flip=False):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
# Source Rotation
source_img_90 = np.rot90(source_img,axes=(1,2))
source_img_180 = np.rot90(source_img_90,axes=(1,2))
source_img_270 = np.rot90(source_img_180,axes=(1,2))
# Target Rotation
target_img_90 = np.rot90(target_img,axes=(1,2))
target_img_180 = np.rot90(target_img_90,axes=(1,2))
target_img_270 = np.rot90(target_img_180,axes=(1,2))
# Add a flip to the rotation
if flip == True:
source_img_lr = np.fliplr(source_img)
source_img_90_lr = np.fliplr(source_img_90)
source_img_180_lr = np.fliplr(source_img_180)
source_img_270_lr = np.fliplr(source_img_270)
target_img_lr = np.fliplr(target_img)
target_img_90_lr = np.fliplr(target_img_90)
target_img_180_lr = np.fliplr(target_img_180)
target_img_270_lr = np.fliplr(target_img_270)
#source_img_90_ud = np.flipud(source_img_90)
# Save the augmented files
# Source images
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)
# Target images
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)
if flip == True:
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)
def flip(Source_path, Target_path):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
source_img_lr = np.fliplr(source_img)
target_img_lr = np.fliplr(target_img)
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
if Use_Data_augmentation:
if os.path.exists(Saving_path+'/augmented_source'):
shutil.rmtree(Saving_path+'/augmented_source')
os.mkdir(Saving_path+'/augmented_source')
if os.path.exists(Saving_path+'/augmented_target'):
shutil.rmtree(Saving_path+'/augmented_target')
os.mkdir(Saving_path+'/augmented_target')
print("Data augmentation enabled")
print("Data augmentation in progress....")
if Rotation == True:
rotation_aug(Training_source,Training_target,flip=Flip)
elif Rotation == False and Flip == True:
flip(Training_source,Training_target)
print("Done")
if not Use_Data_augmentation:
print(bcolors.WARNING+"Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
pretrained_model_choice = "Model_from_file" #@param ["Model_from_file"]
Weights_choice = "last" #@param ["last", "best"]
#@markdown ###If you chose "Model_from_file", please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
# --------------------- Load the model from the choosen path ------------------------
if pretrained_model_choice == "Model_from_file":
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Download the a model provided in the XXX ------------------------
if pretrained_model_choice == "Model_name":
pretrained_model_name = "Model_name"
pretrained_model_path = "/content/"+pretrained_model_name
print("Downloading the 2D_Demo_Model_from_Stardist_2D_paper")
if os.path.exists(pretrained_model_path):
shutil.rmtree(pretrained_model_path)
os.makedirs(pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Add additional pre-trained models here ------------------------
# --------------------- Check the model exist ------------------------
# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled,
if not os.path.exists(h5_file_path):
print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')
Use_pretrained_model = False
# If the model path contains a pretrain model, we load the training rate,
if os.path.exists(h5_file_path):
#Here we check if the learning rate can be loaded from the quality control folder
if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:
csvRead = pd.read_csv(csvfile, sep=',')
#print(csvRead)
if "learning rate" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)
print("pretrained network learning rate found")
#find the last learning rate
lastLearningRate = csvRead["learning rate"].iloc[-1]
#Find the learning rate corresponding to the lowest validation loss
min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]
#print(min_val_loss)
bestLearningRate = min_val_loss['learning rate'].iloc[-1]
if Weights_choice == "last":
print('Last learning rate: '+str(lastLearningRate))
if Weights_choice == "best":
print('Learning rate of best validation loss: '+str(bestLearningRate))
if not "learning rate" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)
#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used
if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
# Display info about the pretrained model to be loaded (or not)
if Use_pretrained_model:
print('Weights found in:')
print(h5_file_path)
print('will be loaded prior to training.')
else:
print(bcolors.WARNING+'No pretrained nerwork will be used.')
#@markdown ###<font color=orange> You will need to add or replace the code that loads any previously trained weights to the notebook here.
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Train the network**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches.
###Code
import time
import csv
start = time.time()
#@markdown ##<font color=orange>Start training
# Start Training
#Insert the code necessary to initiate training of your model
#Note that the notebook should load weights either from the model that is
#trained from scratch or if the pretrained weights are used (3.3.)
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
#Create a pdf document with training summary
# Most likely this section will require some tinkering
# by the user to get the pdf output to look nice.
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = "Your Network's name"
day = datetime.now()
date_time = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+date_time
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and methods:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','torch','scipy']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(steps)+' steps on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(32)+','+str(64)+','+str(64)+')) with a batch size of '+str(batch_size)+' and an MSE loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), torch (v '+version_numbers[2]+'), scipy (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
#text = 'The '+Network+' model ('+model_name+') was trained using '+str(dataset_size)+' paired images (image dimensions: '+str(shape)+') using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), torch (v '+version_numbers[2]+'), scipy (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The GPU used was a '+gpu_name+'.'
#if Use_pretrained_model:
# text = 'The '+Network+' model ('+model_name+') was trained using '+str(dataset_size)+' paired images (image dimensions: '+str(shape)+') using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), pytorch (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The GPU used was a '+gpu_name+'. The model was trained from the pretrained model: '+pretrained_model_path+'.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if Use_Data_augmentation:
aug_text = 'The dataset was augmented by'
if Rotation:
aug_text = aug_text+'\n- rotation'
if Flip:
aug_text = aug_text+'\n- flipping'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
# if Use_Default_Advanced_Parameters:
# pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>percentage_validation</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>steps</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{2}</td>
</table>
""".format(percentage_validation,steps,batch_size)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(28, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair (single slice)', ln=1)
pdf.ln(1)
exp_size = io.imread('/content/TrainingDataExample_'+Network_name+'.png').shape
pdf.image('/content/TrainingDataExample_'+Network_name+'.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Name of method: reference'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+'_'+date_time+"_training_report.pdf")
###Output
_____no_output_____
###Markdown
**4.2. Download your model(s) from Google Drive**---Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder. **5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.1. Inspection of the loss function**---First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.***Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.
###Code
#@markdown ##Play the cell to show a plot of training errors vs. epoch number
import csv
from matplotlib import pyplot as plt
lossDataFromCSV = []
vallossDataFromCSV = []
with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:
csvRead = csv.reader(csvfile, delimiter=',')
next(csvRead)
for row in csvRead:
lossDataFromCSV.append(float(row[0]))
vallossDataFromCSV.append(float(row[1]))
epochNumber = range(len(lossDataFromCSV))
plt.figure(figsize=(15,10))
plt.subplot(2,1,1)
plt.plot(epochNumber,lossDataFromCSV, label='Training loss')
plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (linear scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.subplot(2,1,2)
plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')
plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (log scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')
plt.show()
###Output
_____no_output_____
###Markdown
**5.2. Error mapping and quality metrics estimation**--- Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the "Source_QC_folder" and "Target_QC_folder" !**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
# Create a quality control/Prediction Folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
# Insert code to activate the pretrained model if necessary.
# List Tif images in Source_QC_folder
Source_QC_folder_tif = Source_QC_folder+"/*.tif"
Z = sorted(glob(Source_QC_folder_tif))
Z = list(map(imread,Z))
print('Number of test dataset found in the folder: '+str(len(Z)))
# Insert code to perform predictions on all datasets in the Source_QC folder
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/QC_metrics_"+QC_model_name+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = io.imread(os.path.join(Target_QC_folder, i))
# -------------------------------- Source test data --------------------------------
test_source = io.imread(os.path.join(Source_QC_folder,i))
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction",i))
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
#Save ssim_maps
img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)
img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)
img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
# All data is now processed saved
Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same
plt.figure(figsize=(15,15))
# Currently only displays the last computed set, from memory
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))
plt.imshow(img_GT)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))
plt.imshow(img_Source)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction/", Test_FileList[-1]))
plt.imshow(img_Prediction)
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
#Make a pdf summary of the QC results
from datetime import datetime
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'Label-free prediction (fnet)'
#model_name = os.path.basename(QC_model_folder)
day = datetime.now()
date_time = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+date_time
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
if os.path.exists(full_QC_model_path+'/Quality Control/lossCurvePlots.png'):
exp_size = io.imread(full_QC_model_path+'/Quality Control/lossCurvePlots.png').shape
pdf.image(full_QC_model_path+'/Quality Control/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
else:
pdf.set_font('')
pdf.set_font('Arial', size=10)
# pdf.ln(3)
pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.')
pdf.ln(3)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'/Quality Control/QC_example_data.png').shape
pdf.image(full_QC_model_path+'/Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
html = """
<body>
<font size="10" face="Courier New" >
<table width=97% style="margin-left:0px;">"""
with open(full_QC_model_path+'/Quality Control/QC_metrics_'+QC_model_name+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
slice_n = header[1]
mSSIM_PvsGT = header[2]
NRMSE_PvsGT = header[3]
PSNR_PvsGT = header[4]
header = """
<tr>
<th width = 20% align="left">{0}</th>
<th width = 20% align="left">{1}</th>
<th width = 20% align="center">{2}</th>
<th width = 20% align="left">{3}</th>
<th width = 20% align="center">{4}</th>
</tr>""".format(image,slice_n,mSSIM_PvsGT,NRMSE_PvsGT,PSNR_PvsGT)
html = html+header
for row in metrics:
image = row[0]
slice_n = row[1]
mSSIM_PvsGT = row[2]
NRMSE_PvsGT = row[3]
PSNR_PvsGT = row[4]
cells = """
<tr>
<td width = 20% align="left">{0}</td>
<td width = 20% align="center">{1}</td>
<td width = 20% align="center">{2}</td>
<td width = 20% align="center">{3}</td>
<td width = 20% align="center">{4}</td>
</tr>""".format(image,slice_n,str(round(float(mSSIM_PvsGT),3)),str(round(float(NRMSE_PvsGT),3)),str(round(float(PSNR_PvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Name of method: Reference'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'/Quality Control/'+QC_model_name+'_QC_report.pdf')
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---Fill the below code to perform predictions using your model.The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
Prediction_model_folder = "" #@param {type:"string"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Activate the (pre-)trained model
# Provide the code for performing predictions and saving them
print("Images saved into folder:", Result_folder)
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
x = imread(Data_folder+"/"+random_choice)
os.chdir(Result_folder)
y = imread(Result_folder+"/"+random_choice)
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.axis('off')
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.subplot(1,2,2)
plt.axis('off')
plt.imshow(y, interpolation='nearest')
plt.title('Predicted output');
###Output
_____no_output_____
###Markdown
This is a template for a ZeroCostDL4Mic notebook and needs to be filled with appropriate model code and information. Thank you for contributing to the ZeroCostDL4Mic Project. Please use this notebook as a template for your implementation. When your notebook is completed, please upload it to your github page and send us a link so we can reference your work. If possible, remember to provide separate training and test datasets (for quality control) containing source and target images with your finished notebooks. This is very useful so that ZeroCostDL4Mic users can test your notebook. **Name of the Network**--- Description of the network and link to publication with author reference. [author et al, etc.](URL).---*Disclaimer*:This notebook is inspired from the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki) and was created by **Your name**This notebook is based on the following paper: **Original Title of the paper**, Journal, volume, pages, year and complete author list, [link to paper](URL)And source code found in: *provide github link or equivalent if applicable*Provide information on dataset availability and link for download if applicable.**Please also cite this original paper when using or developing this notebook.** **How to use this notebook?**---Video describing how to use ZeroCostDL4Mic notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- Give information on the required structure and dataype of the training dataset. Provide information on quality control dataset, such as:**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. **Additionally, the corresponding input and output files need to have the same name**. Please note that you currently can **only use .tif files!**Here's a common data structure that can work:* Experiment A - **Training dataset** - Low SNR images (Training_source) - img_1.tif, img_2.tif, ... - High SNR images (Training_target) - img_1.tif, img_2.tif, ... - **Quality control dataset** - Low SNR images - img_1.tif, img_2.tif - High SNR images - img_1.tif, img_2.tif - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelerator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
%tensorflow_version 1.x
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime settings are correct then Google did not allocate GPU to your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install Name of the network and dependencies**---
###Code
#@markdown ##<font color=orange>Install Network and dependencies
#Libraries contains information of certain topics.
# Place all imports below this code snippet
import sys
before = [str(m) for m in sys.modules]
#Put the imported code and libraries here
Notebook_version = ['1.12'] #Contact the ZeroCostDL4Mic team to find out about the version number
!pip install fpdf
# Below are templates for the function definitions for the export
# of pdf summaries for training and qc. You will need to adjust these functions
# with the variables and other parameters as necessary to make them
# work for your project
from datetime import datetime
def pdf_export(trained = False, augmentation = False, pretrained_model = False):
# save FPDF() class into a
# variable pdf
#from datetime import datetime
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = "Your network's name"
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
if trained:
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and methods:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','Keras','csbdeep']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size*number_of_patches)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+config.train_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size*number_of_patches)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+config.train_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was re-trained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if augmentation:
aug_text = 'The dataset was augmented by a factor of '+str(Multiply_dataset_by)+' by'
if rotate_270_degrees != 0 or rotate_90_degrees != 0:
aug_text = aug_text+'\n- rotation'
if flip_left_right != 0 or flip_top_bottom != 0:
aug_text = aug_text+'\n- flipping'
if random_zoom_magnification != 0:
aug_text = aug_text+'\n- random zoom magnification'
if random_distortion != 0:
aug_text = aug_text+'\n- random distortion'
if image_shear != 0:
aug_text = aug_text+'\n- image shearing'
if skew_image != 0:
aug_text = aug_text+'\n- image skewing'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>number_of_patches</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{3}</td>
</tr>
<tr>
<td width = 50%>number_of_steps</td>
<td width = 50%>{4}</td>
</tr>
<tr>
<td width = 50%>percentage_validation</td>
<td width = 50%>{5}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{6}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),number_of_patches,batch_size,number_of_steps,percentage_validation,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(27, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread("/content/NetworkNameExampleData.png").shape
pdf.image("/content/NetworkNameExampleData.png", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Your networks name: first author et al. "Title of publication" Journal, year'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
if augmentation:
ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
#Make a pdf summary of the QC results
def qc_pdf_export():
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = "Your network's name"
#model_name = os.path.basename(full_QC_model_path)
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if os.path.exists(full_QC_model_path+'Quality Control/lossCurvePlots.png'):
pdf.image(full_QC_model_path+'Quality Control/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/13))
else:
pdf.set_font('')
pdf.set_font('Arial', size=10)
pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.', align='L')
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
html = """
<body>
<font size="7" face="Courier New" >
<table width=94% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/QC_metrics_'+QC_model_name+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
NRMSE_PvsGT = header[3]
NRMSE_SvsGT = header[4]
PSNR_PvsGT = header[5]
PSNR_SvsGT = header[6]
header = """
<tr>
<th width = 10% align="left">{0}</th>
<th width = 15% align="left">{1}</th>
<th width = 15% align="center">{2}</th>
<th width = 15% align="left">{3}</th>
<th width = 15% align="center">{4}</th>
<th width = 15% align="left">{5}</th>
<th width = 15% align="center">{6}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT,NRMSE_PvsGT,NRMSE_SvsGT,PSNR_PvsGT,PSNR_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
NRMSE_PvsGT = row[3]
NRMSE_SvsGT = row[4]
PSNR_PvsGT = row[5]
PSNR_SvsGT = row[6]
cells = """
<tr>
<td width = 10% align="left">{0}</td>
<td width = 15% align="center">{1}</td>
<td width = 15% align="center">{2}</td>
<td width = 15% align="center">{3}</td>
<td width = 15% align="center">{4}</td>
<td width = 15% align="center">{5}</td>
<td width = 15% align="center">{6}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)),str(round(float(NRMSE_PvsGT),3)),str(round(float(NRMSE_SvsGT),3)),str(round(float(PSNR_PvsGT),3)),str(round(float(PSNR_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- Your networks name: first author et al. "Title of publication" Journal, year'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
print("Depencies installed and imported.")
# Exporting requirements.txt for local run
# -- the developers should leave this below all the other installations
!pip freeze > requirements.txt
# Code snippet to shorten requirements file to essential packages
after = [str(m) for m in sys.modules]
# Ensure this code snippet is placed before all other imports!
# import sys
# before = [str(m) for m in sys.modules]
from builtins import any as b_any
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
df = pd.read_csv('requirements.txt', delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# If necessary, extend mod_name_list with packages where import name is different from package name for pip install
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
# Insert name of network below
file=open('NAME_OF_NETWORK_requirements_simple.txt','w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
###Output
_____no_output_____
###Markdown
**3. Select your paths and parameters**---The code below allows the user to enter the paths to where the training data is and to define the training parameters. **3.1. Setting the main training parameters**--- **Paths for training, predictions and results** Fill the parameters here as needed and update the code. Note that the sections containing `Training_source`, `Training target`, `model_name` and `model_path` should appear in your notebook.**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training parameters****`number_of_epochs`:**Give estimates for training performance given a number of epochs and provide a default value. **Default value:****`other_parameters`:**Give other parameters or default values **Default value:****If additional parameter above affects the training of the notebook give a brief explanation and how problems can be mitigated** **Advanced parameters - experienced users only****`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size****`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16****`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 10**
###Code
class bcolors:
WARNING = '\033[31m'
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
# Ground truth images
Training_target = "" #@param {type:"string"}
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 50#@param {type:"number"}
#@markdown Other parameters, add as necessary
other_parameters = 80#@param {type:"number"} # in pixels
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
number_of_steps = 400#@param {type:"number"}
batch_size = 16#@param {type:"number"}
percentage_validation = 10 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 16
percentage_validation = 10
#Here we define the percentage to use for validation
percentage = percentage_validation/100
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
# The shape of the images.
x = imread(InputFile)
y = imread(OutputFile)
print('Loaded Input images (number, width, length) =', x.shape)
print('Loaded Output images (number, width, length) =', y.shape)
print("Parameters initiated.")
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imread(Training_source+"/"+random_choice)
# Here we check that the input images contains the expected dimensions
if len(x.shape) == 2:
print("Image dimensions (y,x)",x.shape)
if not len(x.shape) == 2:
print(bcolors.WARNING +"Your images appear to have the wrong dimensions. Image dimension",x.shape)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
#Hyperparameters failsafes
# Here we check that patch_size is smaller than the smallest xy dimension of the image
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 8
if not patch_size % 8 == 0:
patch_size = ((int(patch_size / 8)-1) * 8)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is now:",patch_size)
os.chdir(Training_target)
y = imread(Training_target+"/"+random_choice)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
#We save the example data here to use it in the pdf export of the training
plt.savefig('/content/NetworkNameExampleData.png', bbox_inches='tight', pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by rotating the patches in XY-Plane and flip them along X-Axis. This only works if the images are square in XY.Add any other information which is necessary to run augmentation with your notebook/data.
###Code
#@markdown ###<font color = orange>Add any further useful augmentations
Use_Data_augmentation = False #@param{type:"boolean"}
#@markdown Select this option if you want to use augmentation to increase the size of your dataset
#@markdown **Rotate each image 3 times by 90 degrees.**
Rotation = True #@param{type:"boolean"}
#@markdown **Flip each image once around the x axis of the stack.**
Flip = True #@param{type:"boolean"}
#@markdown **Would you like to save your augmented images?**
Save_augmented_images = False #@param {type:"boolean"}
Saving_path = "" #@param {type:"string"}
if not Save_augmented_images:
Saving_path= "/content"
def rotation_aug(Source_path, Target_path, flip=False):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
# Source Rotation
source_img_90 = np.rot90(source_img,axes=(1,2))
source_img_180 = np.rot90(source_img_90,axes=(1,2))
source_img_270 = np.rot90(source_img_180,axes=(1,2))
# Target Rotation
target_img_90 = np.rot90(target_img,axes=(1,2))
target_img_180 = np.rot90(target_img_90,axes=(1,2))
target_img_270 = np.rot90(target_img_180,axes=(1,2))
# Add a flip to the rotation
if flip == True:
source_img_lr = np.fliplr(source_img)
source_img_90_lr = np.fliplr(source_img_90)
source_img_180_lr = np.fliplr(source_img_180)
source_img_270_lr = np.fliplr(source_img_270)
target_img_lr = np.fliplr(target_img)
target_img_90_lr = np.fliplr(target_img_90)
target_img_180_lr = np.fliplr(target_img_180)
target_img_270_lr = np.fliplr(target_img_270)
#source_img_90_ud = np.flipud(source_img_90)
# Save the augmented files
# Source images
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90.tif',source_img_90)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180.tif',source_img_180)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270.tif',source_img_270)
# Target images
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90.tif',target_img_90)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180.tif',target_img_180)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270.tif',target_img_270)
if flip == True:
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_90_lr.tif',source_img_90_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_180_lr.tif',source_img_180_lr)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_270_lr.tif',source_img_270_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_90_lr.tif',target_img_90_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_180_lr.tif',target_img_180_lr)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_270_lr.tif',target_img_270_lr)
def flip(Source_path, Target_path):
Source_images = os.listdir(Source_path)
Target_images = os.listdir(Target_path)
for image in Source_images:
source_img = io.imread(os.path.join(Source_path,image))
target_img = io.imread(os.path.join(Target_path,image))
source_img_lr = np.fliplr(source_img)
target_img_lr = np.fliplr(target_img)
io.imsave(Saving_path+'/augmented_source/'+image,source_img)
io.imsave(Saving_path+'/augmented_source/'+os.path.splitext(image)[0]+'_lr.tif',source_img_lr)
io.imsave(Saving_path+'/augmented_target/'+image,target_img)
io.imsave(Saving_path+'/augmented_target/'+os.path.splitext(image)[0]+'_lr.tif',target_img_lr)
if Use_Data_augmentation:
if os.path.exists(Saving_path+'/augmented_source'):
shutil.rmtree(Saving_path+'/augmented_source')
os.mkdir(Saving_path+'/augmented_source')
if os.path.exists(Saving_path+'/augmented_target'):
shutil.rmtree(Saving_path+'/augmented_target')
os.mkdir(Saving_path+'/augmented_target')
print("Data augmentation enabled")
print("Data augmentation in progress....")
if Rotation == True:
rotation_aug(Training_source,Training_target,flip=Flip)
elif Rotation == False and Flip == True:
flip(Training_source,Training_target)
print("Done")
if not Use_Data_augmentation:
print(bcolors.WARNING+"Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a model of Your Network**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pret-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
pretrained_model_choice = "Model_from_file" #@param ["Model_from_file"]
Weights_choice = "last" #@param ["last", "best"]
#@markdown ###If you chose "Model_from_file", please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
# --------------------- Load the model from the choosen path ------------------------
if pretrained_model_choice == "Model_from_file":
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Download the a model provided in the XXX ------------------------
if pretrained_model_choice == "Model_name":
pretrained_model_name = "Model_name"
pretrained_model_path = "/content/"+pretrained_model_name
print("Downloading the 2D_Demo_Model_from_Stardist_2D_paper")
if os.path.exists(pretrained_model_path):
shutil.rmtree(pretrained_model_path)
os.makedirs(pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
wget.download("", pretrained_model_path)
h5_file_path = os.path.join(pretrained_model_path, "weights_"+Weights_choice+".h5")
# --------------------- Add additional pre-trained models here ------------------------
# --------------------- Check the model exist ------------------------
# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled,
if not os.path.exists(h5_file_path):
print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist')
Use_pretrained_model = False
# If the model path contains a pretrain model, we load the training rate,
if os.path.exists(h5_file_path):
#Here we check if the learning rate can be loaded from the quality control folder
if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:
csvRead = pd.read_csv(csvfile, sep=',')
#print(csvRead)
if "learning rate" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)
print("pretrained network learning rate found")
#find the last learning rate
lastLearningRate = csvRead["learning rate"].iloc[-1]
#Find the learning rate corresponding to the lowest validation loss
min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]
#print(min_val_loss)
bestLearningRate = min_val_loss['learning rate'].iloc[-1]
if Weights_choice == "last":
print('Last learning rate: '+str(lastLearningRate))
if Weights_choice == "best":
print('Learning rate of best validation loss: '+str(bestLearningRate))
if not "learning rate" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)
#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used
if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):
print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)
bestLearningRate = initial_learning_rate
lastLearningRate = initial_learning_rate
# Display info about the pretrained model to be loaded (or not)
if Use_pretrained_model:
print('Weights found in:')
print(h5_file_path)
print('will be loaded prior to training.')
else:
print(bcolors.WARNING+'No pretrained nerwork will be used.')
#@markdown ###<font color=orange> You will need to add or replace the code that loads any previously trained weights to the notebook here.
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Train the network**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches.Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder.
###Code
import time
import csv
# Export the training parameters as pdf (before training, in case training fails)
pdf_export(augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
start = time.time()
#@markdown ##<font color=orange>Start training
# Start Training
#Insert the code necessary to initiate training of your model
#Note that the notebook should load weights either from the model that is
#trained from scratch or if the pretrained weights are used (3.3.)
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# Export the training parameters as pdf (after training)
pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
###Output
_____no_output_____
###Markdown
**5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.1. Inspection of the loss function**---First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.***Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.
###Code
#@markdown ##Play the cell to show a plot of training errors vs. epoch number
import csv
from matplotlib import pyplot as plt
lossDataFromCSV = []
vallossDataFromCSV = []
with open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:
csvRead = csv.reader(csvfile, delimiter=',')
next(csvRead)
for row in csvRead:
lossDataFromCSV.append(float(row[0]))
vallossDataFromCSV.append(float(row[1]))
epochNumber = range(len(lossDataFromCSV))
plt.figure(figsize=(15,10))
plt.subplot(2,1,1)
plt.plot(epochNumber,lossDataFromCSV, label='Training loss')
plt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (linear scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.subplot(2,1,2)
plt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')
plt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')
plt.title('Training loss and validation loss vs. epoch number (log scale)')
plt.ylabel('Loss')
plt.xlabel('Epoch number')
plt.legend()
plt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png')
plt.show()
###Output
_____no_output_____
###Markdown
**5.2. Error mapping and quality metrics estimation**--- Update the code below to perform predictions on your quality control dataset. Use the metrics that are the most meaningful to assess the quality of the prediction.This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the "Source_QC_folder" and "Target_QC_folder" !**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
# Create a quality control/Prediction Folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction")
# Insert code to activate the pretrained model if necessary.
# List Tif images in Source_QC_folder
Source_QC_folder_tif = Source_QC_folder+"/*.tif"
Z = sorted(glob(Source_QC_folder_tif))
Z = list(map(imread,Z))
print('Number of test dataset found in the folder: '+str(len(Z)))
# Insert code to perform predictions on all datasets in the Source_QC folder
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/QC_metrics_"+QC_model_name+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = io.imread(os.path.join(Target_QC_folder, i))
# -------------------------------- Source test data --------------------------------
test_source = io.imread(os.path.join(Source_QC_folder,i))
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction",i))
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
#Save ssim_maps
img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsPrediction_'+i,img_SSIM_GTvsPrediction_32bit)
img_SSIM_GTvsSource_32bit = np.float32(img_SSIM_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/SSIM_GTvsSource_'+i,img_SSIM_GTvsSource_32bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)
img_RSE_GTvsSource_32bit = np.float32(img_RSE_GTvsSource)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsPrediction_'+i,img_RSE_GTvsPrediction_32bit)
io.imsave(QC_model_path+'/'+QC_model_name+'/Quality Control/RSE_GTvsSource_'+i,img_RSE_GTvsSource_32bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
# All data is now processed saved
Test_FileList = os.listdir(Source_QC_folder) # this assumes, as it should, that both source and target are named the same
plt.figure(figsize=(15,15))
# Currently only displays the last computed set, from memory
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = io.imread(os.path.join(Target_QC_folder, Test_FileList[-1]))
plt.imshow(img_GT)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = io.imread(os.path.join(Source_QC_folder, Test_FileList[-1]))
plt.imshow(img_Source)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/Prediction/", Test_FileList[-1]))
plt.imshow(img_Prediction)
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
#Make a pdf summary of the QC results
qc_pdf_export()
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---Fill the below code to perform predictions using your model.The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, provide the name of the model and path to model folder:
#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.
Prediction_model_folder = "" #@param {type:"string"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Activate the (pre-)trained model
# Provide the code for performing predictions and saving them
print("Images saved into folder:", Result_folder)
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
x = imread(Data_folder+"/"+random_choice)
os.chdir(Result_folder)
y = imread(Result_folder+"/"+random_choice)
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.axis('off')
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.subplot(1,2,2)
plt.axis('off')
plt.imshow(y, interpolation='nearest')
plt.title('Predicted output');
###Output
_____no_output_____ |
examples/ipynb_examples/track_faces_on_video_realtime.ipynb | ###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____
###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____
###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____
###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
import sys
sys.executable
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____
###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____
###Markdown
This demo shows how to detect faces from video and displays result in realtimeYou need OpenCV installed to run this example. To install it, run ___pip install opencv-python___
###Code
%pylab inline
import face_recognition
import cv2
import matplotlib.patches as patches
from IPython.display import clear_output
from matplotlib.pyplot import imshow
import matplotlib.pylab as plt
# Loading video for face detection
video_capture = cv2.VideoCapture("../hamilton_clip.mp4")
frame_count = 0
while video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
# Bail out when the video file ends
if not ret:
video_capture.release()
break
# We will search face in every 15 frames to speed up process.
frame_count += 1
if frame_count % 15 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display video frame
# title("Input Stream")
plt.imshow(frame)
# Find all the faces and face encodings in the current frame of video
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
# If faces were found, we will mark it on frame with blue dots
for face_location in face_locations:
plt.plot(face_location[1], face_location[0], 'bo')
plt.plot(face_location[1], face_location[2], 'bo')
plt.plot(face_location[3], face_location[2], 'bo')
plt.plot(face_location[3], face_location[0], 'bo')
# Show frame...
plt.show()
# ... and hold it until a new frame appears
clear_output(wait=True)
###Output
_____no_output_____ |
New Question set Survey/Modeling-Refined-Data-Copy1.ipynb | ###Markdown
Modeling the Refined DataUsing Clustering then Classification Model Importing Libraries and Data
###Code
import pandas as pd
pd.set_option('display.max_colwidth', 500)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import _pickle as pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import calinski_harabasz_score, silhouette_score, davies_bouldin_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tqdm import tqdm_notebook as tqdm
###Output
_____no_output_____
###Markdown
Loading the Profiles
###Code
df = pd.read_csv(r"New_questions.csv")
df
# Loading in the cleaned DF
with open("AI-Dating-App/refined_profiles.pkl",'rb') as fp:
df = pickle.load(fp)
# Viewing the DF
df.head()
###Output
_____no_output_____
###Markdown
Clustering the Refined Data Vectorizing
###Code
def string_convert(x):
"""
First converts the lists in the DF into strings
"""
if isinstance(x, list):
return ' '.join(x)
else:
return x
# Looping through the columns and applying the function
for col in df.columns:
df[col] = df[col].apply(string_convert)
df
ori_df = df
new_df = df
new_df.drop('Name', axis=1, inplace=True)
df = new_df
df
df
def vectorization(df, columns):
"""
Using recursion, iterate through the df until all the categories have been vectorized
"""
column_name = columns[0]
column_name = str(column_name)
# Checking if the column name has been removed already
if column_name not in ['Love Idiology','love ethics','Relationship','Predictability','Which is important','Broken trust','External description','Interaction description','Self description','Interest']:
return df
# if column_name in ['Love Idiology','love ethics']:
# # df[column_name.lower()] = df[column_name].cat.codes
# df[column_name] = df[column_name].astype('category')
# df = dict(enumerate(df[column_name].cat.categories))
# df = pd.DataFrame.from_dict(df, orient='index')
# # print(df)
# df = df.drop(column_name, 1)
# return vectorization(df, df.columns)
else:
# Instantiating the Vectorizer
vectorizer = CountVectorizer()
# cv = CountVectorizer(lowercase=False)
print(df[column_name])
# Fitting the vectorizer to the Bios
# x = vectorizer.fit_transform(df[column_name])
# Creating a new DF that contains the vectorized words
# df_wrds = pd.DataFrame(x.toarray(), columns=vectorizer.get_feature_names())
df_wrds = pd.DataFrame(columns=vectorizer.get_feature_names())
# Concating the words DF with the original DF
new_df = pd.concat([df, df_wrds], axis=1)
# Dropping the column because it is no longer needed in place of vectorization
# new_df = new_df.drop(column_name, axis=1)
new_df = new_df.drop(column_name, axis=1)
return vectorization(new_df, new_df.columns)
# Creating the vectorized DF
vect_df = vectorization(df, df.columns)
# Scaling
scaler = MinMaxScaler()
vect_df = pd.DataFrame(scaler.fit_transform(vect_df), index=vect_df.index, columns=vect_df.columns)
vect_df
###Output
_____no_output_____
###Markdown
PCA
###Code
from sklearn.decomposition import PCA
# Instantiating PCA
pca = PCA()
# Fitting and Transforming the DF
df_pca = pca.fit_transform(df)
# Finding the exact number of features that explain at least 99% of the variance in the dataset
total_explained_variance = pca.explained_variance_ratio_.cumsum()
print(total_explained_variance)
n_over_9 = len(total_explained_variance[total_explained_variance>=0])
print(n_over_9)
n_to_reach_9 = df.shape[1] - n_over_9
print(df.shape[1])
print(n_to_reach_9)
print("PCA reduces the # of features from", df.shape[1], 'to', n_to_reach_9)
# Reducing the dataset to the number of features determined before
pca = PCA(n_components=n_to_reach_9)
# Fitting and transforming the dataset to the stated number of features
df_pca = pca.fit_transform(df)
# Seeing the variance ratio that still remains after the dataset has been reduced
pca.explained_variance_ratio_.cumsum()[-1]
###Output
[0.41305035 0.61415962 0.7907053 0.911528 0.96415958 0.99083848
1. 1. ]
8
10
2
PCA reduces the # of features from 10 to 2
###Markdown
Performing Hierarchical Agglomerative Clustering- First finding the optimum number of clusters
###Code
# Setting the amount of clusters to test out
cluster_cnt = [i for i in range(2,5,1)]
# Establishing empty lists to store the scores for the evaluation metrics
ch_scores = []
s_scores = []
db_scores = []
# The DF for evaluation
eval_df = df_pca
# Looping through different iterations for the number of clusters
for i in tqdm(cluster_cnt):
# Clustering with different number of clusters
clust = AgglomerativeClustering(n_clusters=i, linkage='complete')
clust.fit(eval_df)
cluster_assignments = clust.labels_
# Appending the scores to the empty lists
ch_scores.append(calinski_harabasz_score(eval_df, cluster_assignments))
s_scores.append(silhouette_score(eval_df, cluster_assignments))
db_scores.append(davies_bouldin_score(eval_df, cluster_assignments))
###Output
/home/dark-flash/.local/lib/python3.6/site-packages/ipykernel_launcher.py:15: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0
Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
from ipykernel import kernelapp as app
###Markdown
Helper Function to Evaluate the Clusters
###Code
def cluster_eval(y, x):
"""
Prints the scores of a set evaluation metric. Prints out the max and min values of the evaluation scores.
"""
# Creating a DataFrame for returning the max and min scores for each cluster
df = pd.DataFrame(columns=['Cluster Score'], index=[i for i in range(2, len(y)+2)])
df['Cluster Score'] = y
print('Max Value:\nCluster #', df[df['Cluster Score']==df['Cluster Score'].max()])
print('\nMin Value:\nCluster #', df[df['Cluster Score']==df['Cluster Score'].min()])
# Plotting out the scores based on cluster count
plt.figure(figsize=(16,6))
plt.style.use('bmh')
plt.plot(x,y)
plt.xlabel('# of Clusters')
plt.ylabel('Score')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluation of Clusters
###Code
print("The Calinski-Harabasz Score (find max score):")
cluster_eval(ch_scores, cluster_cnt)
print("\nThe Silhouette Coefficient Score (find max score):")
cluster_eval(s_scores, cluster_cnt)
print("\nThe Davies-Bouldin Score (find minimum score):")
cluster_eval(db_scores, cluster_cnt)
###Output
The Calinski-Harabasz Score (find max score):
Max Value:
Cluster # Cluster Score
4 24.169438
Min Value:
Cluster # Cluster Score
2 8.192651
###Markdown
Running HACAgain but with the optimum cluster count
###Code
# Instantiating HAC based on the optimum number of clusters found
hac = AgglomerativeClustering(n_clusters=3, linkage='complete')
# Fitting
hac.fit(df_pca)
# Getting cluster assignments
cluster_assignments = hac.labels_
# Assigning the clusters to each profile
df['Cluster #'] = cluster_assignments
# vect_df['Cluster #'] = cluster_assignments
###Output
_____no_output_____
###Markdown
Exporting the Clustered DF and Vectorized DF
###Code
with open("refined_cluster.pkl",'wb') as fp:
pickle.dump(df, fp)
# with open("vectorized_refined1.pkl", 'wb') as fp:
# pickle.dump(vect_df, fp)
###Output
_____no_output_____
###Markdown
Classification of the New Profile Importing the Different Classification Models
###Code
# Importing 3 models
from sklearn.dummy import DummyClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import ComplementNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
###Output
_____no_output_____
###Markdown
Train, test, splitting
###Code
# Assigning the split variables
X = df.drop(["Cluster #"], 1)
y = df['Cluster #']
# Train, test, split
X_train, X_test, y_train, y_test = train_test_split(X, y)
###Output
_____no_output_____
###Markdown
Finding the Best Model- Dummy (Baseline Model)- KNN- SVM- NaiveBayes- Logistic Regression- Adaboost
###Code
# Dummy
dummy = DummyClassifier(strategy='stratified')
# KNN
knn = KNeighborsClassifier()
# SVM
svm = SVC(gamma='scale')
# NaiveBayes
nb = ComplementNB()
# Logistic Regression
lr = LogisticRegression()
# Adaboost
adab = AdaBoostClassifier()
# List of models
models = [dummy, knn, svm, nb, lr, adab]
# List of model names
names = ['Dummy', 'KNN', 'SVM', 'NaiveBayes', 'Logistic Regression', 'Adaboost']
# Zipping the lists
classifiers = dict(zip(names, models))
# Visualization of the different cluster counts
df['Cluster #'].value_counts().plot(kind='pie', title='Count of Class Distribution');#Series.plot(kind='pie')
###Output
_____no_output_____
###Markdown
Since we are dealing with an imbalanced dataset _(because each cluster is not guaranteed to have the same amount of profiles)_, we will resort to using the __Macro Avg__ and __F1 Score__ for evaluating the performances of each model.
###Code
# Dictionary containing the model names and their scores
models_f1 = {}
# Looping through each model's predictions and getting their classification reports
for name, model in tqdm(classifiers.items()):
# Fitting the model
model.fit(X_train, y_train)
print('\n'+ name + ' (Macro Avg - F1 Score):')
# Classification Report
report = classification_report(y_test, model.predict(X_test), output_dict=True)
f1 = report['macro avg']['f1-score']
# Assigning to the Dictionary
models_f1[name] = f1
print(f1)
###Output
/home/dark-flash/.local/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0
Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
"""
###Markdown
Model with the Best Performance
###Code
print(max(models_f1, key=models_f1.get), 'Score:', max(models_f1.values()))
###Output
Logistic Regression Score: 1.0
###Markdown
Fitting the Best Model to our Dataset_(Optional: Tune the model with GridSearch)_
###Code
# Fitting the model
nb.fit(X, y)
###Output
_____no_output_____
###Markdown
Saving the Classification ModelFor future use
###Code
from joblib import dump
dump(nb, "refined_model1.joblib")
###Output
_____no_output_____ |
examples/VegaPlots.ipynb | ###Markdown
Plot correlation as 1 wedge
###Code
vega.plots.plot_1wedge(models=[model]);
###Output
_____no_output_____
###Markdown
Plot correlations in 2 wedges
###Code
vega.plots.plot_2wedges(models=[model]);
###Output
_____no_output_____
###Markdown
Plot correlations in 4 wedges
###Code
vega.plots.plot_4wedges(models=[model]);
###Output
_____no_output_____ |
docs/cereja_example.ipynb | ###Markdown
Install cereja
###Code
!pip install cereja --upgrade
###Output
Collecting cereja
Downloading cereja-1.4.9-py3-none-any.whl (103 kB)
[K |████████████████████████████████| 103 kB 5.2 MB/s
[?25hInstalling collected packages: cereja
Successfully installed cereja-1.4.9
###Markdown
Import cereja
###Code
import cereja as cj
###Output
[31m🍒[30m Using Cereja v.1.2.4
###Markdown
Commons is_iterablereturn whether an object is iterable or not.
###Code
my_var = [1,2,3] # change value and execute the cell
cj.is_iterable(my_var)
cj.is_iterable('hi')
###Output
_____no_output_____
###Markdown
is_sequenceReturn whether an object a Sequence or not, exclude strings.
###Code
cj.is_sequence([1,2,3])
cj.is_sequence('hi')
###Output
_____no_output_____
###Markdown
theta_angleCalculates and returns theta angle between two vectors
###Code
u = (2,2)
v = (0, -2)
cj.theta_angle(u, v)
###Output
_____no_output_____
###Markdown
group_items_in_batchesresponsible for grouping items in batch taking into account the quantity of items per batch
###Code
my_var = [1,2,3,4]
num_items_per_batch = 4
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch)
print(f"items per batch with leftovers: {result}")
"""
Note that the last batch does not contain the same number as the previous batch,
this is due to the split split. You can choose to fill this void as in the example below.
"""
my_var = [1,2,3,4]
num_items_per_batch = 3
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch, fill="cereja")
print(f"fill values: {result}")
# Other examples
result = cj.group_items_in_batches(items=['a','b','c','d'], items_per_batch=2)
print(f"Other examples: {result}")
###Output
Other examples: [['a', 'b'], ['c', 'd']]
###Markdown
remove_duplicate_itemsremove duplicates items in an item list or duplicate items list of list
###Code
#simple list
my_var = [1,2,3,4,4]
result = cj.remove_duplicate_items(my_var)
print(f"simple list: {my_var} --> {result}")
#list of list
my_var = [[1,2,3,4,4], [1,2,3,4,4]]
result = cj.remove_duplicate_items(my_var)
print(f"list of list: {my_var} --> {result}")
# other example
my_var = [['hi'], ['hi'], ['ih']]
result = cj.remove_duplicate_items(my_var)
print(f"other example: {my_var} --> {result}")
###Output
other example: [['hi'], ['hi'], ['ih']] --> [['hi'], ['ih']]
###Markdown
flattenReceives values, whether arrays of values, regardless of their shape and flatness
###Code
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence)
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence, max_recursion=2)
###Output
_____no_output_____
###Markdown
Freq ClassEnumerates the amount of identical intens generating a dictionary of frequencies. Where KEY is item - the original list's item - and VALUE is the total amount. instance of Freq
###Code
# Instance
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
print(freq)
###Output
Freq({1: 1, 2: 1, 3: 3, 4: 1, 5: 2, 6: 2, 7: 2, 12: 1, 31: 1, 123: 1})
###Markdown
most_freq returns the most frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.most_common(4)
###Output
_____no_output_____
###Markdown
least_freqWhen returning the least frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.least_freq(4)
###Output
_____no_output_____
###Markdown
ProgressA simple way to display progress to the user Example-1
###Code
import cereja as cj
import time
def process_data(i: int):
# simulates some processing
time.sleep(cj.rand_n()/max(abs(i), 1))
my_iterable = range(1, 500)
my_progress = cj.Progress("My Progress")
for i in my_progress(my_iterable):
process_data(i)
###Output
[31m🍒[0;0m Using Cereja v.1.4.9
[31m🍒[34m My Progress [36m»[0;0m [0;0m499/499 - [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m] - 100.00% - 🕜 00:00:03 total - [38;5;2mDone! ✅[38;5;2m[0;0m [0;0m
###Markdown
Custom Display Update the percentage by the end of the task
###Code
import cereja as cj
import time
progress = cj.Progress("My Progress")
print(progress)
print(progress[0])
print(progress[1])
print(progress[2])
class MyCustomState(cj.State):
def display(self, current_value, max_value, *args, **kwargs):
return f'{current_value} -> {max_value}'
def done(self, *args, **kwargs):
return f'FINISHED'
progress[0] = MyCustomState
for i in progress(range(1, 500)):
time.sleep(1/i)
###Output
Progress('_StateValue', '_StateBar', '_StatePercent', '_StateTime')
[31m🍒[34m Example States View [36m»[0;0m [0;0m100/100 - [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m] - 100.00% - 🕜 -00:00:00 total - [38;5;2mDone! ✅[38;5;2m[0;0m [0;0m
_StateValue field 100/100
_StateBar field [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m]
_StatePercent field 100.00%
[31m🍒[34m My Progress [36m»[0;0m [0;0mFINISHED - [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m] - 100.00% - 🕜 00:00:06 total - [38;5;2mDone! ✅[38;5;2m[0;0m [0;0m
###Markdown
Many tasks
###Code
import time
with cj.Progress("Progress Test") as bar:
time.sleep(5) # Awaiting data!
for i in bar(range(1, 500), 'task-1'):
time.sleep(1 / i)
# in an abstract way we identified that the previous task ended.
for i in bar(range(1, 400), 'task-2'):
time.sleep(1 / i)
###Output
[31m🍒[34m Progress Test(task-2) [36m»[0;0m [0;0m399/399 - [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m] - 100.00% - 🕜 00:00:06 total - [38;5;2mDone! ✅[38;5;2m[0;0m [0;0m
###Markdown
FileToolsBaseline of this Basics Usage
###Code
import cereja as cj
data = ['how', 'are', 'you!']
# Create new file
file_ = cj.FileIO.create("/content/new_file.txt", data)
doc = f"""
>>> file_ = cj.FileIO("/content/new_file.txt", data)
>>> file_
{file_}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.add(2, ['CerejaFile'])
{file_.add(['CerejaFile'], 2)}
>>> file_.data
{repr(file_.data)}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.dir_name
{repr(file_.dir_name)}
>>> file_.dir_path
{repr(file_.dir_path)}
>>> file_.ext
{repr(file_.ext)}
>>> file_.name
{repr(file_.name)}
>>> file_.name_without_ext
{repr(file_.name_without_ext)}
>>> file_.is_empty
{repr(file_.is_empty)}
>>> file_.line_sep
{repr(file_.line_sep)}
>>> file_.length
{repr(file_.length)}
>>> file_.path_
{repr(file_.path)}
"""
print(doc)
with cj.Progress('Saving File') as prog:
file_.save()
###Output
>>> file_ = cj.FileIO("/content/new_file.txt", data)
>>> file_
TXT<new_file.txt>
>>> file_.size(unit='KB')
0.056
>>> file_.add(2, ['CerejaFile'])
None
>>> file_.data
['how', 'are', 'CerejaFile', 'you!']
>>> file_.size(unit='KB')
0.088
>>> file_.dir_name
'content'
>>> file_.dir_path
/content
>>> file_.ext
'.txt'
>>> file_.name
'new_file.txt'
>>> file_.name_without_ext
'new_file'
>>> file_.is_empty
False
>>> file_.line_sep
'\n'
>>> file_.length
4
>>> file_.path_
/content/new_file.txt
[31m🍒[34m Saving File [36m»[0;0m [0;0m100/100 - [[38;5;2m▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰[0;0m] - 100.00% - 🕜 00:00:00 total - [38;5;2mDone! ✅[38;5;2m[0;0m [0;0m
###Markdown
Decorators time_execused to signal or perform a particular function.
###Code
cj.set_log_level('INFO') # needed to see message
@cj.decorators.time_exec
def my_function(my_param): # Change-me
print(my_param)
my_function("Hi Cereja")
###Output
INFO:cereja.utils._utils:Update log level to INFO
Hi Cereja
INFO:cereja.utils.decorators:[my_function] performed 0.00013065338134765625
###Markdown
deprecationused info deprecated func
###Code
@cj.decorators.depreciation("path.dotted.to.alternative")
def foo(bar: str):
return f'Result of Foo -> {bar}'
# simule user use func
foo("Test")
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:6: DeprecationWarning: This function will be deprecated in future versions. You can use path.dotted.to.alternative
###Markdown
Path group_path_from_dir returns data tuples based on the number of items entered for each tuple,follows the default order if no sort function is sent
###Code
"""
group_path_from_dir function ...
"""
my_dir = '/content/sample_data'
my_ext_file = '.csv'
cj.group_path_from_dir(dir_path=my_dir, num_items_on_tuple=2, ext_file=my_ext_file)
###Output
_____no_output_____
###Markdown
file_name
###Code
my_file_path = '/content/sample_data/california_housing_test.csv'
result = cj.file_name(file_path=my_file_path)
print(f"file_path: {my_file_path} --> {result}")
# or with ext
result = cj.file_name(file_path=my_file_path, with_ext=True)
print(f"file_path: {my_file_path} --> {result}")
###Output
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test.csv
###Markdown
Install cereja
###Code
!pip install cereja --upgrade
###Output
Requirement already up-to-date: cereja in /usr/local/lib/python3.6/dist-packages (1.1.2.post1)
###Markdown
Import cereja
###Code
import cereja as cj
###Output
[31m🍒[30m Using Cereja v.1.1.2-1
###Markdown
Commons is_iterablereturn whether an object is iterable or not.
###Code
my_var = [1,2,3] # change value and execute the cell
cj.is_iterable(my_var)
cj.is_iterable('hi')
###Output
_____no_output_____
###Markdown
is_sequenceReturn whether an object a Sequence or not, exclude strings.
###Code
cj.is_sequence([1,2,3])
cj.is_sequence('hi')
###Output
_____no_output_____
###Markdown
theta_angleCalculates and returns theta angle between two vectors
###Code
u = (2,2)
v = (0, -2)
cj.theta_angle(u, v)
###Output
_____no_output_____
###Markdown
group_items_in_batchesresponsible for grouping items in batch taking into account the quantity of items per batch
###Code
my_var = [1,2,3,4]
num_items_per_batch = 4
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch)
print(f"items per batch with leftovers: {result}")
"""
Note that the last batch does not contain the same number as the previous batch,
this is due to the split split. You can choose to fill this void as in the example below.
"""
my_var = [1,2,3,4]
num_items_per_batch = 3
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch, fill="cereja")
print(f"fill values: {result}")
# Other examples
result = cj.group_items_in_batches(items=['a','b','c','d'], items_per_batch=2)
print(f"Other examples: {result}")
###Output
Other examples: [['a', 'b'], ['c', 'd']]
###Markdown
remove_duplicate_itemsremove duplicates items in an item list or duplicate items list of list
###Code
#simple list
my_var = [1,2,3,4,4]
result = cj.remove_duplicate_items(my_var)
print(f"simple list: {my_var} --> {result}")
#list of list
my_var = [[1,2,3,4,4], [1,2,3,4,4]]
result = cj.remove_duplicate_items(my_var)
print(f"list of list: {my_var} --> {result}")
# other example
my_var = [['hi'], ['hi'], ['ih']]
result = cj.remove_duplicate_items(my_var)
print(f"other example: {my_var} --> {result}")
###Output
other example: [['hi'], ['hi'], ['ih']] --> [['hi'], ['ih']]
###Markdown
flattenReceives values, whether arrays of values, regardless of their shape and flatness
###Code
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence)
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence, max_recursion=2)
###Output
_____no_output_____
###Markdown
Freq ClassEnumerates the amount of identical intens generating a dictionary of frequencies. Where KEY is item - the original list's item - and VALUE is the total amount. instance of Freq
###Code
# Instance
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
print(freq)
###Output
WARNING:cereja.arraytools:[!] class still under development
{3: 3, 5: 2, 6: 2, 7: 2, 1: 1, 2: 1, 4: 1, 12: 1, 31: 1, 123: 1}
###Markdown
most_freq returns the most frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.most_freq(4)
###Output
WARNING:cereja.arraytools:[!] class still under development
###Markdown
least_freqWhen returning the least frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.least_freq(4)
###Output
WARNING:cereja.arraytools:[!] class still under development
###Markdown
ProgressA simple way to display progress to the user Example-1
###Code
import cereja as cj
import time
def process_data(i: int):
# simulates some processing
time.sleep(cj.rand_n()/max(abs(i), 1))
my_iterable = range(1, 500)
my_progress = cj.Progress("My Progress")
for i in my_progress(my_iterable):
process_data(i)
###Output
[31m🍒[34m My Progress [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:03 total - [32mDone! ✅[32m[37m [37m
[31m🍒[34m My Progress [36m»[37m [37mCereja's console [31mout![37m[37m[37m [37m
###Markdown
Custom Display Update the percentage by the end of the task
###Code
import cereja as cj
import time
progress = cj.Progress("My Progress")
print(progress)
print(progress[0])
print(progress[1])
print(progress[2])
class MyCustomState(cj.StateBase):
def display(self, current_value, max_value, *args, **kwargs):
return f'{current_value} -> {max_value}'
def done(self, *args, **kwargs):
return f'FINISHED'
progress[0] = MyCustomState
for i in progress(range(1, 500)):
time.sleep(1/i)
###Output
Progress('__StateBar', '__StatePercent', '__StateTime')
[31m🍒[34m Example States View [36m»[37m [37m[==============================] - 100.00% - 🕜 -00:00:00 total - [32mDone! ✅[32m[37m [37m
Bar field [==============================]
Percent field 100.00%
Time field 🕜 00:00:00 total
[31m🍒[34m My Progress [36m»[37m [37mFINISHED - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
###Markdown
Many tasks
###Code
import time
with cj.Progress("Progress Test") as bar:
time.sleep(5) # Awaiting data!
for i in bar(range(1, 500), 'task-1'):
time.sleep(1 / i)
# in an abstract way we identified that the previous task ended.
for i in bar(range(1, 400), 'task-2'):
time.sleep(1 / i)
###Output
[31m🍒[34m Progress Bar Test(task-1) [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
[31m🍒[34m Progress Bar Test(task-2) [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
###Markdown
FileToolsBaseline of this Basics Usage
###Code
import cereja as cj
data = ['how', 'are', 'you!']
# Create new file
file_ = cj.File("/content/new_file.txt", data)
doc = f"""
>>> file_ = cj.File("/content/new_file.txt", data)
>>> file_
{file_}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.insert(2, ['CerejaFile'])
{file_.insert(2, ['CerejaFile'])}
>>> file_.content_file
{repr(file_.content_file)}
>>> file_.content_str
{repr(file_.content_str)}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.dir_name
{repr(file_.dir_name)}
>>> file_.dir_path
{repr(file_.dir_path)}
>>> file_.ext
{repr(file_.ext)}
>>> file_.file_name
{repr(file_.file_name)}
>>> file_.file_name_without_ext
{repr(file_.file_name_without_ext)}
>>> file_.is_empty
{repr(file_.is_empty)}
>>> file_.is_link
{repr(file_.is_link)}
>>> file_.line_sep
{repr(file_.line_sep)}
>>> file_.n_lines
{repr(file_.n_lines)}
>>> file_.path_
{repr(file_.path_)}
"""
print(doc)
with cj.Progress('Saving File') as prog:
file_.save()
###Output
_____no_output_____
###Markdown
Decorators time_execused to signal or perform a particular function.
###Code
cj.set_log_level('INFO') # needed to see message
@cj.time_exec
def my_function(my_param): # Change-me
print(my_param)
my_function("Hi Cereja")
###Output
_____no_output_____
###Markdown
deprecationused info deprecated func
###Code
@cj.depreciation("path.dotted.to.alternative")
def foo(bar: str):
return f'Result of Foo -> {bar}'
# simule user use func
foo("Test")
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:6: DeprecationWarning: This function will be deprecated in future versions. You can use path.dotted.to.alternative
###Markdown
Path group_path_from_dir returns data tuples based on the number of items entered for each tuple,follows the default order if no sort function is sent
###Code
"""
group_path_from_dir function ...
"""
my_dir = '/content/sample_data'
my_ext_file = '.csv'
cj.group_path_from_dir(dir_path=my_dir, num_items_on_tuple=2, ext_file=my_ext_file)
###Output
_____no_output_____
###Markdown
file_name
###Code
my_file_path = '/content/sample_data/california_housing_test.csv'
result = cj.file_name(file_path=my_file_path)
print(f"file_path: {my_file_path} --> {result}")
# or with ext
result = cj.file_name(file_path=my_file_path, with_ext=True)
print(f"file_path: {my_file_path} --> {result}")
###Output
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test.csv
###Markdown
Install cereja
###Code
!pip install cereja --upgrade
###Output
Collecting cereja
[?25l Downloading https://files.pythonhosted.org/packages/b2/45/1f021fbfc25169fbd3b12b3b8e5d60afc121485860faeb101b7c03bc06b6/cereja-1.2.4-py3-none-any.whl (66kB)
[K |█████ | 10kB 17.2MB/s eta 0:00:01
[K |██████████ | 20kB 2.8MB/s eta 0:00:01
[K |██████████████▉ | 30kB 3.8MB/s eta 0:00:01
[K |███████████████████▉ | 40kB 4.2MB/s eta 0:00:01
[K |████████████████████████▉ | 51kB 3.2MB/s eta 0:00:01
[K |█████████████████████████████▊ | 61kB 3.7MB/s eta 0:00:01
[K |████████████████████████████████| 71kB 2.8MB/s
[?25hInstalling collected packages: cereja
Successfully installed cereja-1.2.4
###Markdown
Import cereja
###Code
import cereja as cj
###Output
[31m🍒[30m Using Cereja v.1.2.4
###Markdown
Commons is_iterablereturn whether an object is iterable or not.
###Code
my_var = [1,2,3] # change value and execute the cell
cj.is_iterable(my_var)
cj.is_iterable('hi')
###Output
_____no_output_____
###Markdown
is_sequenceReturn whether an object a Sequence or not, exclude strings.
###Code
cj.is_sequence([1,2,3])
cj.is_sequence('hi')
###Output
_____no_output_____
###Markdown
theta_angleCalculates and returns theta angle between two vectors
###Code
u = (2,2)
v = (0, -2)
cj.theta_angle(u, v)
###Output
_____no_output_____
###Markdown
group_items_in_batchesresponsible for grouping items in batch taking into account the quantity of items per batch
###Code
my_var = [1,2,3,4]
num_items_per_batch = 4
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch)
print(f"items per batch with leftovers: {result}")
"""
Note that the last batch does not contain the same number as the previous batch,
this is due to the split split. You can choose to fill this void as in the example below.
"""
my_var = [1,2,3,4]
num_items_per_batch = 3
result = cj.group_items_in_batches(items=my_var, items_per_batch=num_items_per_batch, fill="cereja")
print(f"fill values: {result}")
# Other examples
result = cj.group_items_in_batches(items=['a','b','c','d'], items_per_batch=2)
print(f"Other examples: {result}")
###Output
Other examples: [['a', 'b'], ['c', 'd']]
###Markdown
remove_duplicate_itemsremove duplicates items in an item list or duplicate items list of list
###Code
#simple list
my_var = [1,2,3,4,4]
result = cj.remove_duplicate_items(my_var)
print(f"simple list: {my_var} --> {result}")
#list of list
my_var = [[1,2,3,4,4], [1,2,3,4,4]]
result = cj.remove_duplicate_items(my_var)
print(f"list of list: {my_var} --> {result}")
# other example
my_var = [['hi'], ['hi'], ['ih']]
result = cj.remove_duplicate_items(my_var)
print(f"other example: {my_var} --> {result}")
###Output
other example: [['hi'], ['hi'], ['ih']] --> [['hi'], ['ih']]
###Markdown
flattenReceives values, whether arrays of values, regardless of their shape and flatness
###Code
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence)
sequence = [[1, 2, 3], [], [[2, [3], 4], 6]]
cj.flatten(sequence, max_recursion=2)
###Output
_____no_output_____
###Markdown
Freq ClassEnumerates the amount of identical intens generating a dictionary of frequencies. Where KEY is item - the original list's item - and VALUE is the total amount. instance of Freq
###Code
# Instance
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
print(freq)
###Output
Freq({1: 1, 2: 1, 3: 3, 4: 1, 5: 2, 6: 2, 7: 2, 12: 1, 31: 1, 123: 1})
###Markdown
most_freq returns the most frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.most_common(4)
###Output
_____no_output_____
###Markdown
least_freqWhen returning the least frequent items from the list, the maximum number of items must be entered.
###Code
freq = cj.Freq([1,2,3,3,4,5,6,7,6,7,12,31,123,5,3])
freq.least_freq(4)
###Output
_____no_output_____
###Markdown
ProgressA simple way to display progress to the user Example-1
###Code
import cereja as cj
import time
def process_data(i: int):
# simulates some processing
time.sleep(cj.rand_n()/max(abs(i), 1))
my_iterable = range(1, 500)
my_progress = cj.Progress("My Progress")
for i in my_progress(my_iterable):
process_data(i)
###Output
[31m🍒[34m My Progress [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:03 total - [32mDone! ✅[32m[37m [37m
[31m🍒[34m My Progress [36m»[37m [37mCereja's console [31mout![37m[37m[37m [37m
###Markdown
Custom Display Update the percentage by the end of the task
###Code
import cereja as cj
import time
progress = cj.Progress("My Progress")
print(progress)
print(progress[0])
print(progress[1])
print(progress[2])
class MyCustomState(cj.StateBase):
def display(self, current_value, max_value, *args, **kwargs):
return f'{current_value} -> {max_value}'
def done(self, *args, **kwargs):
return f'FINISHED'
progress[0] = MyCustomState
for i in progress(range(1, 500)):
time.sleep(1/i)
###Output
Progress('__StateBar', '__StatePercent', '__StateTime')
[31m🍒[34m Example States View [36m»[37m [37m[==============================] - 100.00% - 🕜 -00:00:00 total - [32mDone! ✅[32m[37m [37m
Bar field [==============================]
Percent field 100.00%
Time field 🕜 00:00:00 total
[31m🍒[34m My Progress [36m»[37m [37mFINISHED - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
###Markdown
Many tasks
###Code
import time
with cj.Progress("Progress Test") as bar:
time.sleep(5) # Awaiting data!
for i in bar(range(1, 500), 'task-1'):
time.sleep(1 / i)
# in an abstract way we identified that the previous task ended.
for i in bar(range(1, 400), 'task-2'):
time.sleep(1 / i)
###Output
[31m🍒[34m Progress Test(task-1) [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
[31m🍒[34m Progress Test(task-2) [36m»[37m [37m[==============================] - 100.00% - 🕜 00:00:06 total - [32mDone! ✅[32m[37m [37m
###Markdown
FileToolsBaseline of this Basics Usage
###Code
import cereja as cj
data = ['how', 'are', 'you!']
# Create new file
file_ = cj.File("/content/new_file.txt", data)
doc = f"""
>>> file_ = cj.File("/content/new_file.txt", data)
>>> file_
{file_}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.insert(2, ['CerejaFile'])
{file_.insert(2, ['CerejaFile'])}
>>> file_.content_file
{repr(file_.content_file)}
>>> file_.content_str
{repr(file_.content_str)}
>>> file_.size(unit='KB')
{repr(file_.size(unit='KB'))}
>>> file_.dir_name
{repr(file_.dir_name)}
>>> file_.dir_path
{repr(file_.dir_path)}
>>> file_.ext
{repr(file_.ext)}
>>> file_.file_name
{repr(file_.file_name)}
>>> file_.file_name_without_ext
{repr(file_.file_name_without_ext)}
>>> file_.is_empty
{repr(file_.is_empty)}
>>> file_.is_link
{repr(file_.is_link)}
>>> file_.line_sep
{repr(file_.new_line_sep)}
>>> file_.n_lines
{repr(file_.n_lines)}
>>> file_.path_
{repr(file_.path)}
"""
print(doc)
with cj.Progress('Saving File') as prog:
file_.save()
###Output
>>> file_ = cj.File("/content/new_file.txt", data)
>>> file_
FileBase<new_file.txt>
>>> file_.size(unit='KB')
0.008
>>> file_.insert(2, ['CerejaFile'])
None
>>> file_.content_file
['how', 'are', 'CerejaFile', 'you!']
>>> file_.content_str
'how\nare\nCerejaFile\nyou!'
>>> file_.size(unit='KB')
0.019
>>> file_.dir_name
'content'
>>> file_.dir_path
'/content'
>>> file_.ext
'.txt'
>>> file_.file_name
'new_file.txt'
>>> file_.file_name_without_ext
'new_file'
>>> file_.is_empty
False
>>> file_.is_link
False
>>> file_.line_sep
'\n'
>>> file_.n_lines
4
>>> file_.path_
<Path object /content/new_file.txt>
[31m🍒[34m Saving File [36m»[37m [37mAwaiting.. [37m [37m
###Markdown
Decorators time_execused to signal or perform a particular function.
###Code
cj.set_log_level('INFO') # needed to see message
@cj.time_exec
def my_function(my_param): # Change-me
print(my_param)
my_function("Hi Cereja")
###Output
INFO:cereja.utils:Update log level to INFO
Hi Cereja
INFO:cereja.decorators:[my_function] performed 7.510185241699219e-05
###Markdown
deprecationused info deprecated func
###Code
@cj.depreciation("path.dotted.to.alternative")
def foo(bar: str):
return f'Result of Foo -> {bar}'
# simule user use func
foo("Test")
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:6: DeprecationWarning: This function will be deprecated in future versions. You can use path.dotted.to.alternative
###Markdown
Path group_path_from_dir returns data tuples based on the number of items entered for each tuple,follows the default order if no sort function is sent
###Code
"""
group_path_from_dir function ...
"""
my_dir = '/content/sample_data'
my_ext_file = '.csv'
cj.group_path_from_dir(dir_path=my_dir, num_items_on_tuple=2, ext_file=my_ext_file)
###Output
_____no_output_____
###Markdown
file_name
###Code
my_file_path = '/content/sample_data/california_housing_test.csv'
result = cj.file_name(file_path=my_file_path)
print(f"file_path: {my_file_path} --> {result}")
# or with ext
result = cj.file_name(file_path=my_file_path, with_ext=True)
print(f"file_path: {my_file_path} --> {result}")
###Output
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test
file_path: /content/sample_data/california_housing_test.csv --> california_housing_test.csv
|
Thermodynamics_an Eng Approach_5Ed_Cengel_IDN/Chapter_1_1.ipynb | ###Markdown
**Thermodynamics: an Engineering Approach, 7th Ed**Cengel & Boles Chapter 1: Introduction and Basic Concepts Example 1-1, Page.8
###Code
#Diketahui:
El_USD = 0.09 # Harga Listrik adalah 0.09 $/kWh
P_wt = 30 # Wind Turbine power rate, kW
t_wt = 2200 # Durasi kerja Wind Turbine dalam satu tahun, hours
#Dicari: pengiritan per tahun (?)
#Jawab:
E_tot = P_wt * t_wt
print ('Total energi adalah %f kWh' %round(E_tot,0))
#Uang yang diirit:
Saved_USD = E_tot * El_USD
print ('Total pengiritan adalah %f USD' %round(Saved_USD,0))
###Output
Total energi adalah 66000.000000 kWh
Total pengiritan adalah 5940.000000 USD
###Markdown
Example 1-2, Page No.9
###Code
#Diketahui:
p=850;# densitas [kg/m^3]
V=2; # volume tangki [m^3]
#Dicari: Massa (?)
m=p*V;# rumus massa
#Hasil:
print ('Massa minyak di dalam tangki adalah %i kg' %round(m,0))
###Output
Massa minyak di dalam tangki adalah 1700 kg
###Markdown
Example 1-3, Page No.10
###Code
#Diketahui
m=1; # massa seberat 1 lbm
#Konstanta yang dipakai
g=32.174;# konstanta gravitasi, ft/s^2
#Dicari: Berat (w)
lbf = 32.174 # Konversi 1 lbf = 32.174 lbm.ft/s^2
w=(m*g)*(1/lbf); # berat = massa * gravitasi
# konversi lbf ke lbm diperlukan
#Result
print ('Berat benda tersebut di bumi adalah %i lbf' %w)
###Output
Berat benda tersebut di bumi adalah 1 lbf
###Markdown
Example 1-4, Page No.21
###Code
# Diketahui
Tc=10; #deltaT karena proses pemberian kalor, C
# Calculations
Tk=Tc;
Tr=1.8*Tk;#Konversi dari R ke K
Tf=Tr;
# Dihitung dengan rumus di atas
#Hasil
print ('perubahan suhu tersebut adalah %i K' %Tk)
print ('perubahan suhu tersebut adalah %i R' %Tr)
print ('perubahan suhu tersebut adalah %i F' %Tf)
###Output
perubahan suhu tersebut adalah 10 K
perubahan suhu tersebut adalah 18 R
perubahan suhu tersebut adalah 18 F
###Markdown
Example 1-5, Page No.23
###Code
#Diketahui
Patm=14.5; #tekanan atmosfir, psi
Pvac=5.8; #pembacaan vacuum gage, psi
#Proses perhitungan
Pabs=Patm-Pvac;#vacuum selalu dihitung sbg tekanan negatif
#Hasil
print('P_absolut dari ruangan tersebut adalah %f psi'%round(Pabs,1))
###Output
P_absolut dari ruangan tersebut adalah 8.700000 psi
###Markdown
Example 1-6, Page No.26
###Code
#Diketahui:
pw=1000; # Densitas air, kg/m^3;
g=9.81; # Gravitasi, m/s^2;
SG=0.85;# Specific Gravity/Dens. Relatif fluida di manometer
meter = 100 # 1 m = 100 cm, cm
h=55/meter;# tinggi dalam satuan, cm
Patm=96;# Tekanan Atmosfir, kPa
# Jawab
# Menghitung P menggunakan likuid pada ketinggian yang sama
p=SG*pw;
Ptank_abs=Patm+(p*g*h/1000);
#Results
print ('absolute pressure in tank %f kPa' %round(Ptank_abs,1))
###Output
absolute pressure in tank 100.600000 kPa
###Markdown
Example 1-7, Page No.28
###Code
#Constants used
g=9.81;#acceleration due to gravity in m/s^2;
#Given values
h1=0.1;# distance b/w point 1 at air-water interface and point 2 at mercury-air interface in m
h2=0.2;# distance b/w oil-water interface and mercury-oil interface in m
h3=0.35;# distance b/w air-mercury interface and mercury-oil interface in m
pw=1000;# density of water in kg/m^3
pHg=13600;# density of mercury in kg/m^3
poil=800;# density of oil in kg/m^3
Patm=85.6;# atmospheric pressure in kPa
#Calculation
P1=Patm-(pw*g*h1+poil*g*h2-pHg*g*h3)/1000;#calculating pressure using liquid at same height have same pressure
#Results
print ('the air pressure in tank %i kPa' %round(P1))
###Output
the air pressure in tank 130 kPa
###Markdown
Example 1-8, Page No.31
###Code
#Constants used
g=9.81;# acceleration due to gravity in m/s^2;
#Given values
pHg=13570;# density of mercury at 10 C in kg/m^3
h=0.74;# converting barometric reading into m from mm
#Calculationa
Patm=pHg*g*h/1000;# standard pressure formula
#Results
print ('the atmospheric pressure %f kPa' %round(Patm,1))
###Output
the atmospheric pressure 98.500000 kPa
###Markdown
Example 1-9, Page No.31
###Code
#constants used
g=9.81;#acceleration due to gravity in m/s^2;
#given values
m=60;# mass of piston in kg
Patm=0.97;# atmospheric pressure in kPa
A=0.04;# cross-sectional area in m^2
#calculation
P=Patm+(m*g/A)/100000;# standard pressure formula
print ('The pressure inside the cylinder %f bar' %round(P,2))
#The volume change will have no effect on the free-body diagram drawn in part (a), and therefore the pressure inside the cylinder will remain the same
print('If some heat is transferred to the gas and its volume is doubled, there is no change in pressure');
###Output
The pressure inside the cylinder 1.120000 bar
If some heat is transferred to the gas and its volume is doubled, there is no change in pressure
###Markdown
Example 1-10, Page No.32
###Code
import math
from scipy.integrate import quad
from pylab import *
#Constants used
g=9.81;#acceleration due to gravity in m/s^2;
#Given values
p=1040;# density on the water surface in kg/m^3
h1=0.8;# thickness of surface zone
H=4;# thickness of gradient zone
x0=0.0;# lower limit of integration
x1=4.0;# upper limit of integration
#Calculations
P1=p*g*h1/1000;#standard pressure determination formula
#P2 = integration of the exp. p*g*(math.sqrt(1+(math.tan(math.pi*z/4/H)^2))) b/w 0-4
def intgrnd1(z):
return (p*g*(math.sqrt(1+(math.tan(math.pi*(z)/4/H)**2))) )#integrant
P2, err = quad(intgrnd1, x0, x1)
P2=P2/1000;#converting into kPa
P=P1+P2;
#Results
print ('the gage pressure at the bottom of gradient zone %f kPa' %round(P,0))
###Output
the gage pressure at the bottom of gradient zone 54.000000 kPa
|
example/bimodal/README.ipynb | ###Markdown
A bimodal exampleThis is a sample to infer the parameters of a bimodal model, which is a mixture of two Normal distribution components.The data is read from data6.2.1.dat.R, which is from  First of course, import necessary packages.
###Code
%matplotlib inline
from mcupy.graph import *
from mcupy.utils import *
from mcupy.nodes import *
from mcupy.jagsparser import *
import scipy
import seaborn
import pylab
###Output
_____no_output_____
###Markdown
Then read the data from a jags data file
###Code
data=parseJagsDataFile('data6.2.1.dat.R')
obsval=data['obsval']
err=data['err']
###Output
_____no_output_____
###Markdown
Then Let's plot the histogram of the data.
###Code
dummy=pylab.hist(obsval,bins=10)
###Output
_____no_output_____
###Markdown
Then compose the Bayesian network
###Code
g=Graph()
p=FixedUniformNode(1e-5,1-1e-5).withTag("p")
sig1=FixedUniformNode(1e-10,10).withTag("sig1")
sig2=FixedUniformNode(1e-10,10).withTag("sig2")
cent1=FixedUniformNode(4,10).withTag("cent1")
cent2Upper=ConstNode(10+1e-6).withTag("cent2Upper")
cent2=UniformNode(cent1,cent2Upper).withTag("cent2")
for i in range(0,len(obsval)):
b=BernNode(p).inGroup("b")
cent=CondNode(b,cent1,cent2).inGroup("cent")
sig=CondNode(b,sig1,sig2).inGroup("sig")
val=NormalNode(cent,sig).inGroup("val")
obsvalNode=NormalNode(val,ConstNode(err[i])).withObservedValue(obsval[i]).inGroup("obsval")
g.addNode(obsvalNode)
###Output
_____no_output_____
###Markdown
Show the structure of the graph to check it.
###Code
display_graph(g)
###Output
_____no_output_____
###Markdown
Declare some monitors to record the results.
###Code
monP=g.getMonitor(p)
monCent1=g.getMonitor(cent1)
monCent2=g.getMonitor(cent2)
monSig1=g.getMonitor(sig1)
monSig2=g.getMonitor(sig2)
###Output
_____no_output_____
###Markdown
Burn 10000 times and sample 10000 times.
###Code
results=[]
for i in log_progress(range(0,10000)):
g.sample()
for i in log_progress(range(0,10000)):
g.sample()
results.append([monP.get(),monCent1.get(),monCent2.get(),monSig1.get(),monSig2.get()])
results=scipy.array(results)
###Output
_____no_output_____
###Markdown
Plot the results.
###Code
dummy=pylab.hist(results[:,0],bins=100)
dummy=pylab.hist(results[:,1],bins=100)
dummy=pylab.hist(results[:,2],bins=100)
dummy=pylab.hist(results[:,3],bins=100)
dummy=pylab.hist(results[:,4],bins=100)
seaborn.jointplot(results[:,1],results[:,2],kind='hex')
seaborn.jointplot(results[:,0],results[:,1],kind='hex')
###Output
_____no_output_____ |
PythonIntroCh7_FR.ipynb | ###Markdown
7. Classes 7.1 IntroductionUne chose que vous apprendrez à connaître sur la programmation, est que les programmeurs aiment être paresseux. Si quelque chose a déjà été fait, pourquoi le refaire?C'est ce à quoi servent les fonctions en Python. Vous avez déjà fait faire quelque chose de spécial à votre code. Maintenant, vous voulez le refaire. Vous placez ce code spécial dans une fonction et vous le réutilisez pour ce qu'il vaut. Vous pouvez faire référence à une fonction n'importe où dans votre code, et l'ordinateur saura toujours de quoi vous parlez. Pratique, non?Bien sûr, les fonctions ont leurs limites. Les fonctions ne stockent pas d'informations comme le font les variables - chaque fois qu'une fonction est exécutée, elle repart à zéro. Cependant, certaines fonctions et variables sont étroitement liées les unes aux autres et doivent souvent interagir entre elles. Par exemple, imaginez que vous ayez un club de golf. Il contient des informations (c'est-à-dire des variables) comme la longueur du manche, le matériau de la poignée et le matériau de la tête. Des fonctions lui sont également associées, comme celle de "swinguer" votre club de golf ou celle de le casser par pure frustration. Pour ces fonctions, vous devez connaître les variables de la longueur du manche, du matériau de la tête, etc.Cela peut être facilement géré avec des fonctions normales. Les paramètres affectent l'effet d'une fonction. Mais qu'en est-il si une fonction doit affecter des variables ? Que se passe-t-il si, à chaque fois que vous utilisez votre club de golf, le manche s'affaiblit, la poignée s'use un peu, vous êtes un peu plus frustré et une nouvelle rayure se forme sur la tête du club ? Une fonction ne peut pas faire cela. Une fonction ne produit qu'une seule sortie, pas quatre, cinq ou cinq cents. Ce qu'il faut, c'est un moyen de regrouper les fonctions et les variables qui sont étroitement liées en un seul endroit afin qu'elles puissent interagir entre elles.Il y a de fortes chances que vous possédiez également plus d'un club de golf. Sans classes, vous devez écrire tout un tas de code pour chaque club de golf différent. C'est un problème, car tous les clubs ont des caractéristiques communes, mais certains ont des propriétés différentes, comme la composition du manche et son poids. L'idéal serait d'avoir un design de votre club de golf de base. Chaque fois que vous créez un nouveau club, il vous suffit de spécifier ses attributs - la longueur de son manche, son poids, etc.Et si vous voulez un club de golf doté de caractéristiques supplémentaires ? Peut-être décidez-vous de fixer une horloge à votre club de golf (pourquoi, je ne sais pas - c'était votre idée). Cela signifie-t-il que nous devons créer ce club de golf à partir de zéro ? Devrions-noud d'abord écrire le code de notre club de golf de base, tout cela à nouveau, ainsi que le code de l'horloge, pour notre nouveau design. Ne serait-il pas préférable de prendre notre club de golf existant et d'y ajouter le code de l'horloge?Ce sont des problèmes qu'une chose appelée programmation orientée objet résout. Elle met les fonctions et les variables ensemble de manière à ce qu'elles puissent se voir et travailler ensemble, être répliquées et modifiées selon les besoins, et non pas quand c'est inutile. Et nous utilisons une chose appelée `class` pour faire cela. 7.2 Création d'une `Class`Qu'est-ce qu'une class ? Pensez à une class comme à un plan (blueprint). Elle n'est pas quelque chose en soi, elle décrit simplement comment faire quelque chose. Vous pouvez créer un grand nombre d'objets à partir de ce plan - ce que l'on appelle techniquement une *instance*.Comment créer ces soi-disant 'classes'? Très facilement, avec l'opérateur `class`:```Python Définition d'une classclass nom_class: [instruction 1] [instruction 2] [instruction 3] [etc]```Cela n'a pas beaucoup de sens ? Ce n'est pas grave, voici un exemple, qui crée la définition d'une `Forme`(Shape):```PythonUn exemple de classclass Forme: def __init__(self,x,y): self.x = x self.y = y description = "Cette forme n'a pas encore été décrite" auteur = "Personne n'a encore prétendu réaliser cette forme" def aire(self): return self.x * self.y def périmètre(self): return 2 * self.x + 2 * self.y def décrire(self,text): self.description = text def nom_Auteur(self,text): self.auteur = text def échelletaille(self,scale): self.x = self.x * scale self.y = self.y * scale```Ce que vous avez créé est une description d'une forme (c'est-à-dire les variables) et les opérations que vous pouvez faire avec cette forme (c'est-à-dire les fonctions). Ceci est très important - vous n'avez pas créé une forme réelle, simplement la description de ce qu'est une forme. La forme a une largeur (`x`), une hauteur (`y`), une aire et un périmètre (`aire(self)` et `périmètre(self)`). Aucun code n'est exécuté lorsque vous définissez une classe - vous créez simplement des fonctions et des variables.La fonction appelée `__init__` est exécutée lorsque nous créons une instance de `Forme` - c'est-à-dire, lorsque nous créons une forme réelle, par opposition au 'plan ou blueprint' que nous avons ici, `__init__` est exécutée. Vous comprendrez plus tard comment cela fonctionne.`self` est la façon dont nous nous référons aux choses dans la class à partir d'elle-même. `self` est le premier paramètre de toute fonction définie à l'intérieur d'une class. Toute fonction ou variable créée au premier niveau d'indentation (c'est-à-dire les lignes de code qui commencent une TAB à droite de l'endroit où nous avons placé la classe `Forme`) est automatiquement placée dans self. Pour accéder à ces fonctions et variables ailleurs dans la class, leur nom doit être précédé de `self` et d'un point (par exemple `self.nom_variable`). Sans `self`, vous ne pouvez utiliser les variables qu'à l'intérieur de la fonction où elles sont définies, pas dans d'autres fonctions de la même `class`. 7.3 Utilisation d'une `class`C'est bien beau de pouvoir créer une classe, mais comment l'utiliser ? Voici un exemple de ce que nous appelons la création d'une instance d'une class. Supposons que le code ci-dessus ait déjà été exécuté:```Pythonrectangle = Forme(100,45)```Qu'est-ce qui a été fait ? Cela demande un peu d'explications...C'est maintenant que la fonction `__init__` entre vraiment en jeu. On crée une instance d'une class en lui donnant d'abord son nom (dans ce cas, `Forme`) puis, entre parenthèses, les valeurs à passer à la fonction `__init__`. La fonction init s'exécute (en utilisant les paramètres que vous lui avez donnés entre parenthèses) et génère une instance de cette class, qui dans ce cas porte le nom de `rectangle`.Pensez à notre instance de class, `rectangle`, comme une collection autonome de variables et de fonctions. De la même manière que nous avons utilisé `self` pour accéder aux fonctions et aux variables de l'instance de class depuis l'intérieur de celle-ci, nous utilisons le nom que nous lui avons attribué (rectangle) pour accéder aux fonctions et aux variables de l'instance de classe depuis l'extérieur de celle-ci. En ajoutant tout le code ci-dessus, nous ferions ceci :
###Code
class Forme:
def __init__(self,x,y):
self.x = x
self.y = y
description = "Cette forme n'a pas encore été décrite"
auteur = "Personne n'a encore prétendu réaliser cette forme"
def aire(self):
return self.x * self.y
def périmètre(self):
return 2 * self.x + 2 * self.y
def décrire(self,text):
self.description = text
def nom_Auteur(self,text):
self.auteur = text
def échelletaille(self,scale):
self.x = self.x * scale
self.y = self.y * scale
rectangle = Forme(100,45)
#trouver l'aire de votre rectangle:
print(rectangle.aire())
#trouver le périmètre de votre rectangle:
print(rectangle.périmètre())
#décrire le rectangle
rectangle.décrire("Un large rectangle, avec une longueur plus\
de fois sa largeur")
#réduction de 50 % de la taille du rectangle
rectangle.échelletaille(0.5)
#réaffichage de la nouvelle surface du rectangle
print(rectangle.aire())
###Output
_____no_output_____
###Markdown
Comme vous le voyez, là où `self` serait utilisé à l'intérieur de l'instance de la class, son nom assigné est utilisé à l'extérieur de la class. Nous faisons cela pour voir et modifier les variables à l'intérieur de la class, et pour accéder aux fonctions qui s'y trouvent.Nous ne sommes pas limités à une seule instance d'une class - nous pouvons avoir autant d'instances que nous le souhaitons. Je pourrais faire ceci:```Pythonlongrectangle = Forme(120,10)largerectangle = Forme(130,120)```et `longrectangle` et `largerectangle` ont tous deux leurs propres fonctions et variables - ils sont totalement indépendants les uns des autres. Il n'y a pas de limite au nombre d'instances que je peux créer.Essayez avec quelques instances différentes dans le champ ci-dessus. 7.4 TermesLa programmation orientée objet est associée à un certain nombre de termes (lingo). Il est temps de mettre tout cela au clair:* Lorsque nous décrivons pour la première fois une `class`, nous la *définissons* (comme pour les fonctions)* La possibilité de regrouper des fonctions et des variables similaires est appelée *encapsulation** Le mot `class` peut être utilisé pour décrire le code dans lequel la classe est définie (comme pour la définition d'une fonction), et il peut aussi faire référence à une instance de cette `class` - cela peut prêter à confusion, alors assurez-vous de savoir sous quelle forme nous parlons des classes.* Une variable dans une classe est appelée un *Attribut* (Attribute)* Une fonction dans une classe est appelée une *méthode* (method)* Une class fait partie de la même catégorie de choses que les variables, les listes, les dictionnaires, etc. C'est-à-dire que ce sont des *objets** Une class est connue comme une "structure de données" (data structure)- elle contient des données et les méthodes pour traiter ces données. 7.5 HéritageRevenons sur l'introduction. Nous savons que les class regroupent des variables et des fonctions, appelées attributs et méthodes, de sorte que les données et le code pour les traiter se trouvent au même endroit. Nous pouvons créer un nombre quelconque d'instances de cette classe, de sorte que nous n'avons pas à écrire un nouveau code pour chaque nouvel objet que nous créons. Mais qu'en est-il de l'ajout de fonctionnalités supplémentaires à la conception de notre club de golf ? C'est là que l'*héritage* (inheritance) entre en jeu.Python rend l'héritage très facile. Nous définissons une nouvelle class, basée sur une autre class 'parent'. Notre nouvelle classe reprend tout ce qui se trouve dans la class parent, et nous pouvons également y ajouter d'autres éléments. Si un nouvel attribut ou une nouvelle méthode porte le même nom qu'un attribut ou une méthode de notre class parent, il est utilisé à la place de la class parent. Vous vous souvenez de la classe `Forme`?```Pythonclass Forme: def __init__(self,x,y): self.x = x self.y = y description = "Cette forme n'a pas encore été décrite" auteur = "Personne n'a encore prétendu réaliser cette forme" def aire(self): return self.x * self.y def périmètre(self): return 2 * self.x + 2 * self.y def décrire(self,text): self.description = text def nom_Auteur(self,text): self.auteur = text def échelletaille(self,scale): self.x = self.x * scale self.y = self.y * scale```Si nous voulions définir une nouvelle classe, disons un carré, basée sur notre class Forme précédente, nous ferions ceci :```Pythonclass Carré(Forme): def __init__(self,x): self.x = x self.y = x```C'est exactement comme la définition normale d'une class, mais cette fois, nous mettons entre parenthèses après le nom, la class parent dont nous avons hérité. Comme vous le voyez, nous avons décrit un carré très *rapidement* grâce à cela. C'est parce que nous avons hérité de la classe forme et que nous n'avons modifié que ce qui devait l'être. Dans ce cas, nous avons redéfini la fonction `__init__` de Forme pour que les valeurs de X et Y soient les mêmes.Partons de ce que nous avons appris, et créons une autre nouvelle classe, cette fois héritée de `Carré`. Il s'agira de deux carrés, l'un immédiatement à gauche de l'autre:```Python La forme ressemble à ceci: _________| | || | ||____|____|class DoubleCarré(Carré): def __init__(self,y): self.x = 2 * y self.y = y def périmètre(self): return 2 * self.x + 3 * self.y```Cette fois, nous avons également dû redéfinir la fonction `périmètre`, car il y a une ligne qui passe au milieu de la forme. Essayez de créer une instance de cette class dans le champ ci-dessous et jouez avec différentes valeurs. Puisque la `class Forme` a déjà été exécutée, vous pouvez simplement ajouter seulement les nouvelles classes ici et ajouter définir les instances.
###Code
class Carré(Forme):
def __init__(self,x):
self.x = x
self.y = x
# La forme ressemble à ceci:
# _________
#| | |
#| | |
#|____|____|
class DoubleCarré(Carré):
def __init__(self,y):
self.x = 2 * y
self.y = y
def périmètre(self):
return 2 * self.x + 3 * self.y
testcarré = Carré(5)
testdouble = DoubleCarré(6)
###Output
_____no_output_____
###Markdown
7.6 Pointeurs et dictionnaires de classEn y repensant, lorsque vous dites qu'une variable est égale à une autre, par exemple `variable2 = variable1`, la variable à gauche du signe égal prend la valeur de la variable à droite. Avec les instances de classe, cela se passe un peu différemment - le nom à gauche devient l'instance de class à droite. Ainsi, dans `instance2 = instance1`, `instance2` "pointe" vers `instance1` - il y a deux noms donnés à l'instance de class, et vous pouvez accéder à l'instance de class par l'un ou l'autre nom. Dans d'autres langages, vous faites ce genre de choses en utilisant des *pointeurs*, mais en Python, tout cela se passe en coulisses.La dernière chose que nous allons aborder est les dictionnaires de class. En gardant à l'esprit ce que nous venons d'apprendre sur les pointeurs, nous pouvons affecter une instance d'une class à une entrée dans une liste ou un dictionnaire. Cela permet à n'importe quel nombre d'instances de class d'exister lorsque notre programme est exécuté. Regardons l'exemple ci-dessous et voyons comment il décrit ce dont je parle:
###Code
# Une fois de plus, supposons que les définitions de Forme,
# Carré et DoubleCarré ont été exécutées.
# Tout d'abord, créez un dictionnaire (dictionary):
dictionnaire = {}
# Ensuite, créez quelques instances de class dans le dictionnaire:
dictionnaire["DoubleCarré 1"] = DoubleCarré(5)
dictionnaire["long rectangle"] = Forme(600,45)
#Vous pouvez maintenant les utiliser comme une class normale :
print(dictionnaire["long rectangle"].aire())
dictionnaire["DoubleCarré 1"].nom_Auteur("The Gingerbread Man")
print(dictionnaire["DoubleCarré 1"].auteur)
###Output
_____no_output_____ |
_notebooks/2020-12-19-SGD-3.ipynb | ###Markdown
SGD 实验3
> 利用现有的模型和一些fastbook API (基于fast.ai lesson 4)
- toc:true
- branch: master
- badges: true
- comments: true
- author: jsqihui
- categories: [fast.ai]
###Code
#hide
!pip install -Uqq fastbook
import fastbook
fastbook.setup_book()
from fastai.vision.all import *
from fastbook import *
matplotlib.rc('image', cmap='Greys')
###Output
[K |████████████████████████████████| 727kB 12.0MB/s
[K |████████████████████████████████| 1.1MB 7.6MB/s
[K |████████████████████████████████| 194kB 10.1MB/s
[K |████████████████████████████████| 51kB 4.6MB/s
[K |████████████████████████████████| 61kB 5.5MB/s
[?25hMounted at /content/gdrive
###Markdown
mnist实例
###Code
path = untar_data(URLs.MNIST_SAMPLE)
Path.BASE_PATH = path
dls = ImageDataLoaders.from_folder(path)
learn = cnn_learner(dls, resnet18, pretrained=False,
loss_func=F.cross_entropy, metrics=accuracy)
learn.fit_one_cycle(1)
###Output
_____no_output_____ |
course/5 Gradient Descent.ipynb | ###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
A + B
A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[0]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cells. You will see that tensors of different shape cannot be added or multiplied:
###Code
# A + B
# A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cell to visualize the error:
###Code
# A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class");
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10)
result = model.evaluate(X_test, y_test, verbose=0)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15);
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(learning_rate=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=10, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, epochs=10, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from tensorflow.keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(learning_rate=0.01)',
'SGD(learning_rate=0.01, momentum=0.3)',
'SGD(learning_rate=0.01, momentum=0.3, nesterov=True)',
'Adam(learning_rate=0.01)',
'Adagrad(learning_rate=0.01)',
'RMSprop(learning_rate=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test, verbose=0)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cells. You will see that tensors of different shape cannot be added or multiplied:
###Code
# A + B
# A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cell to visualize the error:
###Code
# A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class");
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import tensorflow.keras.backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10)
result = model.evaluate(X_test, y_test, verbose=0)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15);
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(learning_rate=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=10, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, epochs=10, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from tensorflow.keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(learning_rate=0.01)',
'SGD(learning_rate=0.01, momentum=0.3)',
'SGD(learning_rate=0.01, momentum=0.3, nesterov=True)',
'Adam(learning_rate=0.01)',
'Adagrad(learning_rate=0.01)',
'RMSprop(learning_rate=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
plt.figure(figsize=(12,8))
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('accuracy', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(learning_rate=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test, verbose=0)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
A + B
A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot()
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
412/412 [==============================] - 0s 101us/step
412/412 [==============================] - 0s 33us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 25us/step
412/412 [==============================] - 0s 34us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 35us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 27us/step
412/412 [==============================] - 0s 30us/step
412/412 [==============================] - 0s 36us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 25us/step
412/412 [==============================] - 0s 25us/step
412/412 [==============================] - 0s 29us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 28us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 30us/step
412/412 [==============================] - 0s 28us/step
412/412 [==============================] - 0s 27us/step
###Markdown
Exercise 1You've just been hired at a wine company and they would like you to help them build a model that predicts the quality of their wine based on several measurements. They give you a dataset with wine- Load the ../data/wines.csv into Pandas- Use the column called "Class" as target- Check how many classes are there in target, and if necessary use dummy columns for a multi-class classification- Use all the other columns as features, check their range and distribution (using seaborn pairplot)- Rescale all the features using either MinMaxScaler or StandardScaler- Build a deep model with at least 1 hidden layer to classify the data- Choose the cost function, what will you use? Mean Squared Error? Binary Cross-Entropy? Categorical Cross-Entropy?- Choose an optimizer- Choose a value for the learning rate, you may want to try with several values- Choose a batch size- Train your model on all the data using a `validation_split=0.2`. Can you converge to 100% validation accuracy?- What's the minumum number of epochs to converge?- Repeat the training several times to verify how stable your results are
###Code
df = pd.read_csv('../data/wines.csv')
sns.pairplot(df, hue='Class')
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
X = df.loc[:,df.columns!='Class']
X = pd.DataFrame(mms.fit_transform(X))
y = df[['Class']]
from keras.utils import np_utils
uniques, ids = np.unique(y, return_inverse=True)
y_code = np_utils.to_categorical(ids, len(uniques))
from sklearn.model_selection import train_test_split
from keras.layers import Dropout
X_train, X_test, y_train, y_test = train_test_split(X, y_code, test_size=0.2)
K.clear_session()
model = Sequential()
model.add(Dense(16, input_shape=(13,), activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
model.compile(Adam(0.1),loss='categorical_crossentropy',metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128,epochs=100, validation_split=0.2)
result = model.evaluate(X_test, y_test)
print(result)
y_test_pred = np.argmax(model.predict(X_test), axis=1)
y_train_pred = np.argmax(model.predict(X_train), axis=1)
y_test_true = np.argmax(y_test,axis=1)
y_train_true = np.argmax(y_train,axis=1)
from sklearn.metrics import accuracy_score,classification_report
print("The accuracy train score is {:0.3f}".format(accuracy_score(y_test_pred, y_test_true)))
print(classification_report(y_test_pred, y_test_true))
print("The accuracy test score is {:0.3f}".format(accuracy_score(y_train_pred, y_train_true)))
print(classification_report(y_train_pred, y_train_true))
###Output
The accuracy train score is 0.972
precision recall f1-score support
0 1.00 1.00 1.00 12
1 1.00 0.93 0.97 15
2 0.90 1.00 0.95 9
avg / total 0.98 0.97 0.97 36
The accuracy test score is 0.993
precision recall f1-score support
0 1.00 0.98 0.99 48
1 0.98 1.00 0.99 56
2 1.00 1.00 1.00 38
avg / total 0.99 0.99 0.99 142
###Markdown
Exercise 2Since this dataset has 13 features we can only visualize pairs of features like we did in the Paired plot. We could however exploit the fact that a neural network is a function to extract 2 high level features to represent our data.- Build a deep fully connected network with the following structure: - Layer 1: 8 nodes - Layer 2: 5 nodes - Layer 3: 2 nodes - Output : 3 nodes- Choose activation functions, inizializations, optimizer and learning rate so that it converges to 100% accuracy within 20 epochs (not easy)- Remember to train the model on the scaled data- Define a Feature Funtion like we did above between the input of the 1st layer and the output of the 3rd layer- Calculate the features and plot them on a 2-dimensional scatter plot- Can we distinguish the 3 classes well?
###Code
from sklearn.model_selection import train_test_split
from keras.layers import Dropout
X_train, X_test, y_train, y_test = train_test_split(X, y_code, test_size=0.2)
optimizers = ['Adam(lr=0.01)',
'Adam(lr=0.04)',
'Adam(lr=0.08)']
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
activations = ['relu','tanh','sigmoid']
max_result = 0
max_str = ''
for act in activations:
for opt in optimizers:
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(8, input_shape=(13,), activation=act, kernel_initializer=init))
model.add(Dense(5, activation=act, kernel_initializer=init))
model.add(Dense(2, activation=act,kernel_initializer=init))
model.add(Dense(3, activation='softmax',kernel_initializer=init))
model.compile(optimizer=eval(opt),loss='categorical_crossentropy',metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=20, validation_split=0.2, verbose=0)
result = model.evaluate(X_test, y_test)
str_s = act + ":" + str(opt) + ":" + str(init) + ":" + str(result)
if result[1] > max_result:
max_result = result[1]
max_str = str_s
print(str_s)
print("Best result: ", max_str)
model = Sequential()
model.add(Dense(8, input_shape=(13,), activation='tanh', kernel_initializer='lecun_uniform'))
model.add(Dense(5, activation='tanh', kernel_initializer='lecun_uniform'))
model.add(Dense(2, activation='tanh',kernel_initializer='lecun_uniform'))
model.add(Dense(3, activation='softmax',kernel_initializer='lecun_uniform'))
model.compile(optimizer=eval('Adam(lr=0.04)'),loss='categorical_crossentropy',metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=20, validation_split=0.2, verbose=0)
result = model.evaluate(X_test, y_test)
inp = model.layers[0].input
out = model.layers[2].output
features_function = K.function([inp], [out])
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
###Output
_____no_output_____
###Markdown
Exercise 3Keras functional API. So far we've always used the Sequential model API in Keras. However, Keras also offers a Functional API, which is much more powerful. You can find its [documentation here](https://keras.io/getting-started/functional-api-guide/). Let's see how we can leverage it.- define an input layer called `inputs`- define two hidden layers as before, one with 8 nodes, one with 5 nodes- define a `second_to_last` layer with 2 nodes- define an output layer with 3 nodes- create a model that connect input and output- train it and make sure that it converges- define a function between inputs and second_to_last layer- recalculate the features and plot them
###Code
from keras.layers import Input, Dense
from keras.models import Model
K.clear_session()
# This returns a tensor
inputs = Input(shape=(13,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(8, activation='relu')(inputs)
x = Dense(5, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(3, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=Adam(0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, validation_split=0.2) # starts training
result = model.evaluate(X_test, y_test)
print(result)
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
###Output
_____no_output_____
###Markdown
Exercise 4 Keras offers the possibility to call a function at each epoch. These are Callbacks, and their [documentation is here](https://keras.io/callbacks/). Callbacks allow us to add some neat functionality. In this exercise we'll explore a few of them.- Split the data into train and test sets with a test_size = 0.3 and random_state=42- Reset and recompile your model- train the model on the train data using `validation_data=(X_test, y_test)`- Use the `EarlyStopping` callback to stop your training if the `val_loss` doesn't improve- Use the `ModelCheckpoint` callback to save the trained model to disk once training is finished- Use the `TensorBoard` callback to output your training information to a `/tmp/` subdirectory- Watch the next video for an overview of tensorboard
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y_code, test_size=0.3, random_state=42)
from keras.layers import Input, Dense
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
K.clear_session()
a = EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=1, mode='auto')
b = ModelCheckpoint(filepath='../models/course5.hdf5', verbose=1, save_best_only=True)
c = TensorBoard(log_dir='./logs/2',write_graph=True, write_images=True)
callbacks = [a, b, c]
# This returns a tensor
model = Sequential()
model.add(Dense(8, input_shape=(13,), activation='tanh', kernel_initializer='lecun_uniform'))
model.add(Dense(5, activation='tanh', kernel_initializer='lecun_uniform'))
model.add(Dense(2, activation='tanh',kernel_initializer='lecun_uniform'))
model.add(Dense(3, activation='softmax',kernel_initializer='lecun_uniform'))
model.compile(optimizer=eval('Adam(lr=0.04)'),loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, validation_data=(X_test, y_test),callbacks=callbacks) # starts training
result = model.evaluate(X_test, y_test)
print(result)
###Output
Train on 124 samples, validate on 54 samples
Epoch 1/100
124/124 [==============================] - 0s 1ms/step - loss: 1.0628 - acc: 0.2742 - val_loss: 0.8688 - val_acc: 0.6111
Epoch 00001: val_loss improved from inf to 0.86877, saving model to ../models/course5.hdf5
Epoch 2/100
124/124 [==============================] - 0s 96us/step - loss: 0.8036 - acc: 0.6532 - val_loss: 0.6814 - val_acc: 0.6296
Epoch 00002: val_loss improved from 0.86877 to 0.68137, saving model to ../models/course5.hdf5
Epoch 3/100
124/124 [==============================] - 0s 114us/step - loss: 0.6484 - acc: 0.6613 - val_loss: 0.6024 - val_acc: 0.6296
Epoch 00003: val_loss improved from 0.68137 to 0.60244, saving model to ../models/course5.hdf5
Epoch 4/100
124/124 [==============================] - 0s 105us/step - loss: 0.5810 - acc: 0.6694 - val_loss: 0.5855 - val_acc: 0.6296
Epoch 00004: val_loss improved from 0.60244 to 0.58549, saving model to ../models/course5.hdf5
Epoch 5/100
124/124 [==============================] - 0s 104us/step - loss: 0.5569 - acc: 0.6532 - val_loss: 0.5542 - val_acc: 0.6296
Epoch 00005: val_loss improved from 0.58549 to 0.55420, saving model to ../models/course5.hdf5
Epoch 6/100
124/124 [==============================] - 0s 118us/step - loss: 0.5251 - acc: 0.7661 - val_loss: 0.5163 - val_acc: 0.7593
Epoch 00006: val_loss improved from 0.55420 to 0.51630, saving model to ../models/course5.hdf5
Epoch 7/100
124/124 [==============================] - 0s 114us/step - loss: 0.5012 - acc: 0.7984 - val_loss: 0.4712 - val_acc: 0.8148
Epoch 00007: val_loss improved from 0.51630 to 0.47121, saving model to ../models/course5.hdf5
Epoch 8/100
124/124 [==============================] - 0s 125us/step - loss: 0.4598 - acc: 0.7581 - val_loss: 0.4146 - val_acc: 0.8889
Epoch 00008: val_loss improved from 0.47121 to 0.41461, saving model to ../models/course5.hdf5
Epoch 9/100
124/124 [==============================] - 0s 119us/step - loss: 0.4176 - acc: 0.8387 - val_loss: 0.3634 - val_acc: 0.9074
Epoch 00009: val_loss improved from 0.41461 to 0.36342, saving model to ../models/course5.hdf5
Epoch 10/100
124/124 [==============================] - 0s 131us/step - loss: 0.3720 - acc: 0.8790 - val_loss: 0.2837 - val_acc: 0.9444
Epoch 00010: val_loss improved from 0.36342 to 0.28370, saving model to ../models/course5.hdf5
Epoch 11/100
124/124 [==============================] - 0s 161us/step - loss: 0.2923 - acc: 0.9516 - val_loss: 0.2492 - val_acc: 0.9444
Epoch 00011: val_loss improved from 0.28370 to 0.24919, saving model to ../models/course5.hdf5
Epoch 12/100
124/124 [==============================] - 0s 127us/step - loss: 0.2349 - acc: 0.9435 - val_loss: 0.1905 - val_acc: 0.9630
Epoch 00012: val_loss improved from 0.24919 to 0.19051, saving model to ../models/course5.hdf5
Epoch 13/100
124/124 [==============================] - 0s 124us/step - loss: 0.1964 - acc: 0.9597 - val_loss: 0.1633 - val_acc: 0.9630
Epoch 00013: val_loss improved from 0.19051 to 0.16332, saving model to ../models/course5.hdf5
Epoch 14/100
124/124 [==============================] - 0s 116us/step - loss: 0.1663 - acc: 0.9597 - val_loss: 0.1044 - val_acc: 0.9815
Epoch 00014: val_loss improved from 0.16332 to 0.10441, saving model to ../models/course5.hdf5
Epoch 15/100
124/124 [==============================] - 0s 123us/step - loss: 0.1590 - acc: 0.9597 - val_loss: 0.0985 - val_acc: 0.9815
Epoch 00015: val_loss improved from 0.10441 to 0.09850, saving model to ../models/course5.hdf5
Epoch 16/100
124/124 [==============================] - 0s 111us/step - loss: 0.1563 - acc: 0.9516 - val_loss: 0.0718 - val_acc: 1.0000
Epoch 00016: val_loss improved from 0.09850 to 0.07185, saving model to ../models/course5.hdf5
Epoch 17/100
124/124 [==============================] - 0s 123us/step - loss: 0.1619 - acc: 0.9435 - val_loss: 0.0731 - val_acc: 0.9815
Epoch 00017: val_loss did not improve from 0.07185
Epoch 00017: early stopping
54/54 [==============================] - 0s 55us/step
[0.07309737760159704, 0.9814814814814815]
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cells. You will see that tensors of different shape cannot be added or multiplied:
###Code
# A + B
# A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cell to visualize the error:
###Code
# A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
A + B
A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
print(X_test)
print(y_test)
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
[[ 0.39953345 0.75408499 -0.36681957 -0.58004763]
[-0.25046579 1.0679812 -1.05175122 -0.63608865]
[-0.13433955 0.87435278 -0.80115119 -0.93281561]
...,
[-0.0950673 -0.90009142 -0.005275 1.21345543]
[ 0.6923484 -0.05114332 0.37915479 0.90867704]
[ 0.90338209 0.66308304 -0.84211711 0.25050694]]
[0 0 0 0 0 0 0 0 0 0 1 1 0 1 0 1 1 1 1 1 0 0 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1
1 1 0 0 1 1 0 1 1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0 1 1 0 1 0
1 0 0 1 1 1 1 0 1 0 0 0 0 1 1 0 0 0 1 1 0 1 1 0 0 0 1 0 0 0 1 0 0 1 1 1 1
1 0 1 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 1 0 1 1 1 1
1 0 1 1 1 0 1 0 1 0 0 0 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 1 0 1
0 1 1 0 0 1 0 0 1 1 1 1 0 0 1 1 1 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 1 1 1
1 0 1 0 0 1 1 1 1 0 1 0 1 1 1 1 0 0 0 1 0 1 1 1 0 0 0 0 0 0 1 0 1 0 0 0 1
1 0 0 0 1 1 0 1 0 1 1 1 1 0 0 0 0 0 1 0 0 1 0 1 0 0 1 0 0 0 0 0 1 1 0 1 0
1 1 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 0 0 0 0 0 1 0 1 1 1 0 0 0 0 1 0 0 0 0 0
0 0 1 0 1 1 1 1 0 1 1 0 1 1 0 0 1 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 1 0 0
1 0 0 0 0 1 1 0 1 0 1 1 1 0 1 1 0 1 1 0 1 0 0 0 1 1 1 0 0 0 0 1 0 0 1 1 1
1 0 1 0 0]
412/412 [==============================] - 0s 59us/step
412/412 [==============================] - 0s 30us/step
412/412 [==============================] - 0s 24us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 27us/step
412/412 [==============================] - 0s 25us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 21us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 22us/step
412/412 [==============================] - 0s 28us/step
412/412 [==============================] - 0s 27us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 27us/step
412/412 [==============================] - 0s 20us/step
412/412 [==============================] - 0s 23us/step
412/412 [==============================] - 0s 22us/step
412/412 [==============================] - 0s 35us/step
412/412 [==============================] - 0s 26us/step
412/412 [==============================] - 0s 28us/step
412/412 [==============================] - 0s 28us/step
412/412 [==============================] - 0s 24us/step
412/412 [==============================] - 0s 22us/step
###Markdown
Exercise 1You've just been hired at a wine company and they would like you to help them build a model that predicts the quality of their wine based on several measurements. They give you a dataset with wine- Load the ../data/wines.csv into Pandas- Use the column called "Class" as target- Check how many classes are there in target, and if necessary use dummy columns for a multi-class classification- Use all the other columns as features, check their range and distribution (using seaborn pairplot)- Rescale all the features using either MinMaxScaler or StandardScaler- Build a deep model with at least 1 hidden layer to classify the data- Choose the cost function, what will you use? Mean Squared Error? Binary Cross-Entropy? Categorical Cross-Entropy?- Choose an optimizer- Choose a value for the learning rate, you may want to try with several values- Choose a batch size- Train your model on all the data using a `validation_split=0.2`. Can you converge to 100% validation accuracy?- What's the minumum number of epochs to converge?- Repeat the training several times to verify how stable your results are
###Code
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
matplotlib.style.use('ggplot')
np.random.seed(1)
df = pd.DataFrame({
'x1': np.random.normal(0, 2, 10000),
'x2': np.random.normal(5, 3, 10000),
'x3': np.random.normal(-5, 5, 10000)
})
df.head()
scaler = preprocessing.MinMaxScaler()
scaled_df = scaler.fit_transform(df)
scaled_df = pd.DataFrame(scaled_df, columns=['x1', 'x2', 'x3'])
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(6, 5))
ax1.set_title('Before Scaling')
sns.kdeplot(df['x1'], ax=ax1)
sns.kdeplot(df['x2'], ax=ax1)
sns.kdeplot(df['x3'], ax=ax1)
ax2.set_title('After Standard Scaler')
sns.kdeplot(scaled_df['x1'], ax=ax2)
sns.kdeplot(scaled_df['x2'], ax=ax2)
sns.kdeplot(scaled_df['x3'], ax=ax2)
plt.show()
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
df = pd.read_csv('../data/wines.csv')
df.describe()
df['Class'].value_counts()
y_cat = pd.get_dummies(y)
y_cat.head()
X = df.drop('Class', axis=1)
X.shape
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
Xsc = sc.fit_transform(X)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation, RMSprop
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(5, input_shape=(13,),
kernel_initializer='he_normal',
activation='relu'
))
model.add(Dense(2, activation='softmax'))
model.compile(RMSprop(lr=0.1),
'categorical_crossentropy',
metrics=['accuracy'])
model.fit(Xsc, y_cat.values, batch_size=8, epochs=10, verbose=1, validation_split=0.2)
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
A + B
A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cells. You will see that tensors of different shape cannot be added or multiplied:
###Code
# A + B
# A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
###Output
_____no_output_____
###Markdown
Uncomment the code in the next cell to visualize the error:
###Code
# A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Gradient Descent
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Linear Algebra with Numpy
###Code
a = np.array([1, 3, 2, 4])
a
type(a)
A = np.array([[3, 1, 2],
[2, 3, 4]])
B = np.array([[0, 1],
[2, 3],
[4, 5]])
C = np.array([[0, 1],
[2, 3],
[4, 5],
[0, 1],
[2, 3],
[4, 5]])
print("A is a {} matrix".format(A.shape))
print("B is a {} matrix".format(B.shape))
print("C is a {} matrix".format(C.shape))
A[0]
C[2, 0]
B[:, 0]
###Output
_____no_output_____
###Markdown
Elementwise operations
###Code
3 * A
A + A
A * A
A / A
A - A
A + B
A * B
###Output
_____no_output_____
###Markdown
Dot product
###Code
A.shape
B.shape
A.dot(B)
np.dot(A, B)
B.dot(A)
C.shape
A.shape
C.dot(A)
A.dot(C)
###Output
_____no_output_____
###Markdown
Gradient descent 
###Code
df = pd.read_csv('../data/banknotes.csv')
df.head()
df['class'].value_counts()
import seaborn as sns
sns.pairplot(df, hue="class")
###Output
_____no_output_____
###Markdown
Baseline model
###Code
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import scale
X = scale(df.drop('class', axis=1).values)
y = df['class'].values
model = RandomForestClassifier()
cross_val_score(model, X, y)
###Output
_____no_output_____
###Markdown
Logistic Regression Model
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train)
result = model.evaluate(X_test, y_test)
historydf = pd.DataFrame(history.history, index=history.epoch)
historydf.plot(ylim=(0,1))
plt.title("Test accuracy: {:3.1f} %".format(result[1]*100), fontsize=15)
###Output
_____no_output_____
###Markdown
Learning Rates
###Code
dflist = []
learning_rates = [0.01, 0.05, 0.1, 0.5]
for lr in learning_rates:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=lr),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
historydf
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([learning_rates, metrics_reported],
names=['learning_rate', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Batch Sizes
###Code
dflist = []
batch_sizes = [16, 32, 64, 128]
for batch_size in batch_sizes:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=batch_size, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([batch_sizes, metrics_reported],
names=['batch_size', 'metric'])
historydf.columns = idx
historydf
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Optimizers
###Code
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
dflist = []
optimizers = ['SGD(lr=0.01)',
'SGD(lr=0.01, momentum=0.3)',
'SGD(lr=0.01, momentum=0.3, nesterov=True)',
'Adam(lr=0.01)',
'Adagrad(lr=0.01)',
'RMSprop(lr=0.01)']
for opt_name in optimizers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=eval(opt_name),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([optimizers, metrics_reported],
names=['optimizers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Initializationhttps://keras.io/initializers/
###Code
dflist = []
initializers = ['zeros', 'uniform', 'normal',
'he_normal', 'lecun_uniform']
for init in initializers:
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,),
kernel_initializer=init,
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=0)
dflist.append(pd.DataFrame(h.history, index=h.epoch))
historydf = pd.concat(dflist, axis=1)
metrics_reported = dflist[0].columns
idx = pd.MultiIndex.from_product([initializers, metrics_reported],
names=['initializers', 'metric'])
historydf.columns = idx
ax = plt.subplot(211)
historydf.xs('loss', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Loss")
ax = plt.subplot(212)
historydf.xs('acc', axis=1, level='metric').plot(ylim=(0,1), ax=ax)
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inner layer representation
###Code
K.clear_session()
model = Sequential()
model.add(Dense(2, input_shape=(4,), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=16, epochs=20,
verbose=1, validation_split=0.3)
result = model.evaluate(X_test, y_test)
result
model.summary()
model.layers
inp = model.layers[0].input
out = model.layers[0].output
inp
out
features_function = K.function([inp], [out])
features_function
features_function([X_test])[0].shape
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
K.clear_session()
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.01),
metrics=['accuracy'])
inp = model.layers[0].input
out = model.layers[1].output
features_function = K.function([inp], [out])
plt.figure(figsize=(15,10))
for i in range(1, 26):
plt.subplot(5, 5, i)
h = model.fit(X_train, y_train, batch_size=16, epochs=1, verbose=0)
test_accuracy = model.evaluate(X_test, y_test)[1]
features = features_function([X_test])[0]
plt.scatter(features[:, 0], features[:, 1], c=y_test, cmap='coolwarm')
plt.xlim(-0.5, 3.5)
plt.ylim(-0.5, 4.0)
plt.title('Epoch: {}, Test Acc: {:3.1f} %'.format(i, test_accuracy * 100.0))
plt.tight_layout()
###Output
32/412 [=>............................] - ETA: 0s |
lab02.ipynb | ###Markdown
Module for computations
###Code
import FEM_utilities as FEM
###Output
_____no_output_____
###Markdown
Parameters:
###Code
a = 1000. # lenght [mm]
b = 200. # width [mm]
t = 1.5 # thickness [mm]
###Output
_____no_output_____
###Markdown
Compute node coordinates, elements, constrained and loaded nodes
###Code
nx = 40
ny = 8
nodes, elements, fixed, loaded = FEM.Nodes(nx, ny, a, b)
#nodes
#elements
#fixed
#loaded
###Output
_____no_output_____
###Markdown
Write input file Set parameters for simulation:- *filename* : name of input file- *eltype* : type of shell element (see conventions [here](http://media.3ds.com/support/documentation/product/V6R2013/en/English/MpeUsbHtml/pt06ch29s06alm17.htm))- *matname*: name of material- **E**, $\nu$: elastic properties (Young modulus and Poisson ratio, as *TYPE* is by default **isotropic**) - *elname*: **\*Elset** assigned name- *bcname*, *loadextname*, *loadintname*: names for boundary condition, external and internal nodes loaded - *pa*, *pb*,*ax*, $\alpha$: coordinate points, axis and angle used to define local coordinate system (see conventions [here](http://media.3ds.com/support/documentation/product/V6R2013/en/English/MpeKeyHtml/ch15abk01.htmusb-kws-morientation)) - *intpoints*: shell integration points- *F*: force at tip of plate [N/mm]
###Code
basename = 'test_'+str(nx)+'x'+str(ny)+'y'
filename = basename+'.inp'
eltype = 'S4R' #'SC8R'
matname = 'material_1'
E = 72000. # modulus [MPa]
ν = 0.33 # Poisson's coefficient
elname = 'plate_1'
bcname = 'fixed_nodes'
loadextnodes = 'loaded_nodes_external'
loadintnodes = 'loaded_nodes_internal'
pa = [1.0,0.0,0.0]
pb = [0.0,1.0,0.0]
# pc = [0.0,0.0,0.0]
ax = 3
α = 0.0
intpoints = 5
F = 0.02 # load [N/mm]
outfile = open(filename, "wt")
outfile.write("** Lab 02 input file test\n")
# NODES section
outfile.write("**\n")
outfile.write("** NODES\n")
outfile.write("**\n")
outfile.write("*Node, nset = nglobal\n")
for i in range(nodes.shape[0]):
nodestring = "{0:4d}".format(int(nodes[i,0]))
for j in range(1,nodes.shape[1]):
nodestring+=",{0:8}".format(nodes[i,j])
nodestring+="\n"
outfile.write(nodestring)
# ELEMENTS section
outfile.write("**\n")
outfile.write("** ELEMENTS\n")
outfile.write("**\n")
outfile.write("*Element, type = {0}\n".format(eltype))
for i in range(elements.shape[0]):
elstring = "{0:4d}".format(int(elements[i,0]))
for j in range(1,elements.shape[1]):
elstring+=",{0:4d}".format(int(elements[i,j]))
elstring+="\n"
outfile.write(elstring)
# MATERIAL section
outfile.write("**\n")
outfile.write("** MATERIALS\n")
outfile.write("**\n")
outfile.write("*Material, name = {0}\n".format(matname))
outfile.write("*Elastic\n")
outfile.write("{0},{1:6}\n".format(E,ν))
# SETS section
# NODES
outfile.write("**\n")
outfile.write("** SETS\n")
outfile.write("**\n")
outfile.write("*Nset, nset = {0}\n".format(bcname))
fix_str = "{0:4d}".format(int(fixed[0]))
for i in range(1,len(fixed)):
fix_str+=",{0:4d}".format(int(fixed[i]))
fix_str+="\n"
outfile.write(fix_str)
if len(loaded > 2):
outfile.write("*Nset, nset = {0}\n".format(loadintnodes))
il_str = "{0:4d}".format(int(loaded[1]))
for i in range(2,len(loaded)-1):
il_str+=",{0:4d}".format(int(loaded[i]))
il_str+="\n"
outfile.write(il_str)
outfile.write("*Nset, nset = {0}\n".format(loadextnodes))
el_str = "{0:4d},{1:4d}\n".format(int(loaded[0]),int(loaded[-1]))
outfile.write(el_str)
# ELEMENTS
outfile.write("*Elset, elset = {0}, generate\n".format(elname))
outfile.write("{0:4d},{1:4d},{2:4d}\n".format(1,elements.shape[0],1))
# ORIENTATION
outfile.write("**\n")
outfile.write("** LOCAL ORIENTATION\n")
outfile.write("**\n")
outfile.write("*orientation, name = local_orientation\n")
outfile.write("".join(str(pa+pb))[1:-1])
outfile.write("\n")
outfile.write("{0},{1:4}\n".format(ax,α))
# SHELL PROPERTIES
outfile.write("**\n")
outfile.write("** SHELL PROPERTIES\n")
outfile.write("**\n")
outfile.write("*Shell Section, elset = {0}, material = {1}, orientation = local_orientation\n" \
.format(elname,matname))
outfile.write("{0:4},{1:4d}\n".format(t,intpoints))
# calculation steps
outfile.write("**\n")
outfile.write("** STEP\n")
outfile.write("**\n")
outfile.write("*Step, name = step_1\n")
outfile.write("*Static\n")
# BOUNDARY CONDITIONS
outfile.write("**\n")
outfile.write("** BOUNDARY CONDITIONS\n")
outfile.write("**\n")
outfile.write("*Boundary\n")
outfile.write("{0}, ENCASTRE\n".format(bcname))
# LOADS
outfile.write("**\n")
outfile.write("** LOADS\n")
outfile.write("**\n")
outfile.write("*Cload\n")
outfile.write("{0}, {1:2d}, {2}\n".format(loadextnodes, 3, F*b/ny/2 ))
outfile.write("{0}, {1:2d}, {2}\n".format(loadintnodes, 3, F*b/ny ))
# FIELD OUTPUT
outfile.write("**\n")
outfile.write("** FIELD OUTPUT\n")
outfile.write("**\n")
outfile.write("*Output, field, variable=PRESELECT\n")
# HISTORY OUTPUT
outfile.write("**\n")
outfile.write("** HISTORY OUTPUT\n")
outfile.write("**\n")
outfile.write("*Output, history, variable=PRESELECT\n")
# PRINT DISPLACEMENT AND STRESS DATA
outfile.write("**\n")
outfile.write("** PRINT RESULTS TO FILE\n")
outfile.write("**\n")
outfile.write("*NODE FILE\n")
outfile.write("U,\n")
outfile.write("RF,\n")
outfile.write("*EL FILE\n")
outfile.write("S,\n")
outfile.write("*End Step\n\n")
outfile.close()
###Output
_____no_output_____
###Markdown
copy file to analysis directory
###Code
if not os.path.exists('../Lab02_abaqus/'+basename):
os.makedirs('../Lab02_abaqus/'+basename)
shutil.copy(filename,'../Lab02_abaqus/'+basename)
###Output
_____no_output_____
###Markdown
Analytical solutions: Maximum stress at $x = 350 mm$:$\sigma_{xx} = \frac{(a-x) \cdot F}{\frac{1}{12}\cdot bt^3} \cdot \frac{t}{2} = \frac{6 \cdot f \cdot (a-x)}{t^2}$
###Code
σ = 6*F*(a-350)/t**2
print("σ max = %6.3f MPa" % σ)
###Output
σ max = 34.667 MPa
###Markdown
Tip displacement:$$\delta = \frac{F \cdot a^3}{3 \cdot EI} = \frac{4 \cdot f \cdot l^3}{E \cdot t^3}$$
###Code
δ = 4*F*a**3/(E*t**3)
print("δ = %7.2f mm" % δ)
###Output
δ = 329.22 mm
###Markdown
$\sigma_{11} @\ 350mm$
###Code
Image(filename='../Lab02_abaqus/'+basename+'/S11.png')
S11_file = open('../Lab02_abaqus/'+basename+'/'+basename+'_S11.rpt')
S11_array = np.zeros(ny)
nodes = [int(y*nx+7./20.*nx) for y in range(ny)]
#print(nodes)
rows = S11_file.readlines()
i_array = 0
for line in rows:
data = line.split()
if len(data)==4 and data[0].isdigit():
if int(data[0]) in nodes:
S11_array[i_array] = float(data[2])
i_array += 1
#print(S11_array)
S11_array
###Output
_____no_output_____
###Markdown
Tip displacement
###Code
Image(filename='../Lab02_abaqus/'+basename+'/Um.png')
###Output
_____no_output_____
###Markdown
 The Origin of MNIST Handwritten Digit DatasetThe MNIST Handwritten Digit Dataset is contributed by Yann LeCun, Corinna Cortes and Christopher Burges to train a machine learning model to solve the issue of handwritten digit classification. Each image is a 28 by 28 pixel square (784 pixels total). This dataset contains 60,000 training images and 10,000 testing images in total for 10 different digits (0 to 10). What is Pytorch?PyTorch is an open-source deep learning library for Python, based on Torch, used for applications such as natural language processing, image recognition, image classification, text processing, etc. It is primarily developed by Facebook’s artificial-intelligence research group. PyTorch provides two high-level features:- Tensor computation (like NumPy) with strong GPU acceleration- Deep neural networks built on a tape-based autodiff system What is Azure Notebook?Azure Notebooks is a free service for anyone to develop and run code in their browser using Jupyter. Jupyter is an open source project that enables combing markdown prose, executable code, and graphics onto a single canvas. Azure Notebooks currently supports Python 2, Python 3, R, F and their popular packages. All the developed code and used data will be stored on the cloud. Step 1: Log in to Microsoft Azure Notebook:Go to https://notebooks.azure.com/, login with your credential. After you successfully login, the screen will be automatically jumped to the figure as shown below. Click on “My Projects” on the task-bar. Then, create a new project by pressing the button “New Project”.Fill in the project name. After that, click the button “Create”.A new folder for this project is created.  Step 2: Create new notebook named “demo”Click on the button “+”, choose “Notebook”.Fill in the Notebook Name as “Demo”. Choose “Python 3.6” and click button “New”.A new notebook is created now for this project. Digit Recognition TaskIf this is your first time running a notebook - welcome!! Notebooks are awesome because they let us play around and experimentwith code with near-instant feedback. Some pointers:1. To execute a cell, click on it and hit SHIFT-Enter2. Once something is executed, the variables are in memory - inspect them! Getting StartedThis first cell imports the necessary libraries so we can get started:1. Numpy — A fundamental package for scientific computing with Python. It contains a powerful N-dimensional array object, sophisticated (broadcasting) functions, tools for integrating C/C++ and Fortran code and useful linear algebra, Fourier transform, and random number capabilities.2. Matplotlib — A Python 2D plotting library which produces publication quality figures in a variety of hard copy formats and interactive environments across platforms.3. Torch — An open-source machine learning library, a scientific computing framework, and a script language based on the Lua programming language.4. Torchvision — A package consists of popular datasets, model architectures, and common image transformations for computer vision.5. Time — A module provides various time-related functions.
###Code
import numpy as np
import torch
import torchvision
import torch.onnx as onnx
import torch.nn.functional as F
import matplotlib.pyplot as plt
from time import time
from torch import nn, optim
from collections import defaultdict
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
###Output
_____no_output_____
###Markdown
Let's also check to make sure torch thinks we can use the GPU.
###Code
torch.cuda.is_available()
no_cuda=False
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print('Using {} device'.format(device))
###Output
Using cuda device
###Markdown
This should be True. If you see False here, something is wrong and torch isn't going to be able to run on the GPU.Possible causes to seeing False at this point:You don't have a GPU (see above with regards to the nvidia-smi check)You have a CPU-only version of pytorch installed, in which case you need to install the gpu enabled versionYou don't have the CUDA libraryYou don't have the CUDA library path included on either the PATH or LD_LIBRARY_PATH variablesYou have the wrong version of CUDA for the version of pytorchProbably other possiblities Data! Without data we really can't do anything with machine learning. At this point we have our sharp question **can we predict a digit given a 28x28 vector of numbers?** Once that is all squared away we need to take a look at our data. The next cell is a helper function that visualizes the the digits (a sanity check). But what are tensors? And how do they relate to machine learning? It is nothing more than a simple mathematical concept. Tensors are mathematical objects that generalize scalars, vectors and matrices to higher dimensions. If you are familiar with basic linear algebra, you should have no trouble understanding what tensors are. In short, a single-dimensional tensor can be represented as a vector. A two-dimensional tensor, as you may have guessed, can be represented as a matrix.Even though it’s easy to generalize tensors as multi-dimensional matrices ranging from zero to N dimensions, it is important to remember that tensors are dynamic. That is, tensors will transform when interacting with other mathematical entities. Matrices, on the other hand, don’t always have this property.  Tensor operations are simple. Consider the following tensors:
###Code
a = np.array([[[4,1,2],
[3,5,2],
[1,6,7]],
[[2,1,0],
[5,4,3],
[6,8,9]],
[[1,2,3],
[2,3,4],
[5,5,5]]])
b = np.array([[[1,1,1],
[2,2,2],
[3,3,3]],
[[4,4,4],
[5,5,5],
[6,6,6]],
[[7,7,7],
[8,8,8],
[9,9,9]]])
a.ndim
print(a+b)
###Output
[[[ 5 2 3]
[ 5 7 4]
[ 4 9 10]]
[[ 6 5 4]
[10 9 8]
[12 14 15]]
[[ 8 9 10]
[10 11 12]
[14 14 14]]]
###Markdown
Where will we use Tensor in this LAB? Remember, most machines cannot learn without having any data. And modern data is often multi-dimensional. Tensors can play an important role in ML by encoding multi-dimensional data. For example in our case, a picture is generally represented by three fields: width, height and depth (color).It makes total sense to encode it as a 3D tensor. However, more than often we are dealing with tens of thousands of pictures. Hence this is where the forth field, sample size comes into play. A series of images, such as the famous MNIST dataset, can be easily stored in a 4D tensor in Tensorflow. This representation allows problems involving big data to be solved easily. Define the pre-processing methods Image transformations are applied onto the images so they can be fed into the model later for training or testing and improve the model accuracy.- transforms.RandomRotation(10) — Rotate the image in the range of [-10, 10] degrees randomly.- transforms.ToTensor() — Convert theimage to tensor.- transforms.Normalize((0.5,), (0.5,)) — Normalize the tensor image with mean and standard deviation. In this case, mean=0.5 and std=0.5 This will normalize the image in the range [-1,1]. For example, the minimum value 0 will be converted to (0-0.5)/0.5=-1, the maximum value of 1 will be converted to (1-0.5)/0.5=1.
###Code
transform = transforms.Compose([transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
###Output
_____no_output_____
###Markdown
Preparation of training and testing dataset and dataloader The operations of these codes are:Download the MNIST handwritten digit recognition dataset into the “train” and “test” folders.Generate the train and test datasets by inputting the downloaded images and transforming them based on defined pre-processing methods in the previous step.Define the train and test loaders by setting 64 images will be fed into the model in each epoch randomly for training and non-randomly for testing. The next cell downloads the standard digit dataset (called MNIST). The `transform` and `target_transform` parts of this call add some conversion steps to make the data more suitable for the models we will try.
###Code
trainset = datasets.MNIST('train', download=True, train=True, transform=transform)
testset = datasets.MNIST('test', download=True, train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
###Output
_____no_output_____
###Markdown
Here is our sanity check!
###Code
trainset
###Output
_____no_output_____
###Markdown
Data Visualization The images can be reconstructed and plotted by loading one batch of images.
###Code
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images.shape)
print(labels.shape)
###Output
torch.Size([64, 1, 28, 28])
torch.Size([64])
###Markdown
As you can see, the shape of the image is (64, 1, 28, 28). For each batch except the last batch, there are 64 grey-scale images. Each image contains 1 channel (grey-scale), 28 pixels of width and 28 pixels of height. Print the unique elements of our dataset:
###Code
y_unq = np.unique(labels)
print(np.unique(labels))
labellist=[]
for images, labels in trainloader:
labellist.append(labels[0].item())
bin_edges = np.concatenate([y_unq, y_unq[[-1]] + 1])
plt.hist(labellist, bins=bin_edges)
plt.xlabel('Class label')
plt.ylabel('Count')
plt.title('Training label distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Plot the images with Matplotlib library.
###Code
figure = plt.figure()
num_of_images = 30
for index in range(1, num_of_images + 1):
plt.subplot(6, 10, index)
plt.axis('off')
plt.imshow(images[index].numpy().squeeze(), cmap='gray_r')
###Output
_____no_output_____
###Markdown
Choosing Models and define the network architectureNow that we have some data it's time to start picking models we think might work. This is where the science part of data-science comes in: we guess and then check if our assumptions were right. Imagine models like water pipes that have to distribute water to 10 different hoses depending on 784 knobs. These 784 knobs represent the individual pixels in the digit and the 10 hoses at the end represent the actual number (or at least the index of the one with the most water coming out of it). Our job now is to pick the plumbing in between.The next three cells represent three different constructions in an increasingly more complex order:1. The first is a simple linear model,2. The second is a 3 layer Neural Network,3. and the last is a full convolutional neural networkWhile it is out of the scope of this tutorial to fully explain how they work, just imagine they are basically plumbing with internal knobs that have to be tuned to produce the right water pressure at the end to push the most water out of the rightindex. As you go down each cell the plumbing and corresponding internal knobs just get more complicated.
###Code
class SimpleLinear(nn.Module):
def __init__(self):
super(SimpleLinear, self).__init__()
self.layer1 = nn.Linear(28*28, 10)
def forward(self, x):
x = self.layer1(x)
return F.softmax(x, dim=1)
class NeuralNework(nn.Module):
def __init__(self):
super(NeuralNework, self).__init__()
self.layer1 = nn.Linear(28*28, 512)
self.layer2 = nn.Linear(512, 512)
self.output = nn.Linear(512, 10)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.output(x)
return F.softmax(x, dim=1)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.softmax(x, dim=1)
model = CNN().to(device)
print(model)
###Output
CNN(
(conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1))
(conv2_drop): Dropout2d(p=0.5, inplace=False)
(fc1): Linear(in_features=320, out_features=50, bias=True)
(fc2): Linear(in_features=50, out_features=10, bias=True)
)
###Markdown
Optimizing Model Parameters. Define the criterion or loss function and optimizerNow that we have some models it's time to optimize the internal parameters to see if it can do a good job at recognizing digits! It turns out there are some parameters that we can give the optimization algorithm to tune how it trains - these are called hyper-parameters. That's what the two variables represent below:
###Code
learning_rate = 1e-3
batch_size = 64
epochs = 5
###Output
_____no_output_____
###Markdown
The `learning_rate` basically specifies how fast the algorithm will learn the model parameters. Right now you're probably thinking "let's set it to fifty million amirite?" The best analogy for why this is a bad idea is golf. I'm a terrible golfist (is that right?) so I don't really know anything - but pretend you are trying to sink a shot (again sorry) but can only hit the ball the same distance every time. Easy right? Hit it the exact length from where you are to the hole! Done! Now pretend you don't know where the hole is but just know the general direction. Now the distance you choose actually matters. If it is too long a distance you'll miss the hole, and then when you hit it back you'll overshoot again. If the distance is too small then it will take forever to get there but for sure you'll eventually get it in. Basically you have to guess what the right distance per shot should be and then try it out. That is basically what the learning rate does for finding the "hole in one" for the right parameters (ok, I'm done with the golf stuff).Below there are three things that make this all work:1. **The Model** - this is the function we're making that takes in the digit vector and should return the right number2. **The Cost Function** (sometimes called the loss function). I know I promised I was done with golf but I lied. Remember how I said in our screwy golf game you knew the general direction of the hole? The cost function tells us the distance to the hole - when it's zero we're there! In actual scientific terms, the cost function tells us how bad the model is at getting the right answer. As we take shots you should see the cost function decreasing. If this does not happen then something is wrong. At this point I would change the shot distance (or `learning_rate`) to something smaller and try again. If that doesn't work maybe change the model!3. **The Optimizer** - this part is the bit that actually changes the model parameters. It has a sense for the direction we should be shooting and updates all of the internal numbers inside the model to find the best internal knobs to predict the right digits. In this case I am using the Binary Cross Entropy cost function because, well, I know it works. There are a ton of different cost functions you can choose from that fit a variety of different scenarios.
###Code
# criterion = nn.NLLLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.5)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
###Output
_____no_output_____
###Markdown
I forgot to mention the whole `cuda` thing. GPUs speed this whole process up because this is basically all a big Matrix multiplcation problem in a `for` loop. PyTorch is great because you basically need to tell the model where to run (either on the CPU or using CUDA - which is a platform for moving computations to the GPU).Now for the learning part! The `dataloader`'s job is to iterate through the entire dataset (in this case 60,000 examples of digits and their corresponding label) but to take chunks of size `batch_size` of the data to process. This is another hyperparameter that needs to be chosen. `epochs` is the number of times we want to loop through the dataset in its entirety (again something we choose based upon how our experiment goes).A word on how long this takes - it takes a while. Train the model We'll:1. Record the start time of the training.2. Set the training epoch to 20.3. For each epoch, reset the running loss to 0, train the model then print the running loss after each epoch.4. Record the end time of the training. Display the duration by using the end training time minus start training time.
###Code
log_interval=10
train_log = defaultdict(list)
val_log = defaultdict(list)
def train(model, device, train_loader, optimizer, epoch, log_interval):
model.train()
train_log = defaultdict(list)
t_log = time()
n_samples = 0
for batch_idx, (data, target) in enumerate(train_loader):
t0 = time()
data, target = data.to(device), target.to(device).long()
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
t1 = time()
loss.backward()
t2 = time()
optimizer.step()
t3 = time()
n_samples += data.shape[0]
if batch_idx % log_interval == 0:
pred = output.max(1, keepdim=True)[1]
correct = pred.eq(target.view_as(pred)).sum().item()
train_log['n_iter'].append(epoch * len(train_loader) + batch_idx + 1)
train_log['n_samples'].append(n_samples + (epoch - 1) * len(train_loader.dataset))
train_log['loss'].append(loss.detach())
train_log['accuracy'].append(100. * correct / data.shape[0])
train_log['time_batch'].append(t3 - t0)
train_log['time_batch_forward'].append(t1 - t0)
train_log['time_batch_backward'].append(t2 - t1)
train_log['time_batch_update'].append(t3 - t2)
t4 = time()
train_log['time_batch_avg'].append((t4 - t_log) / log_interval)
print(
'Train Epochs: {} [{:5d}/{:5d} ({:3.0f}%)]'
'\tLoss: {:.6f}'
'\tTime: {:.4f}ms/batch'.format(
epoch, n_samples, len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item(),
1000 * (t4 - t_log) / log_interval,
)
)
t_log = time()
return train_log
###Output
_____no_output_____
###Markdown
Test the modelThe operations of these codes are:1. Initialize the variables “correct_count” and “all_count” for accuracy calculation.2. Load the test images, feed the images into the well-trained model and get the outputs.3. Compare the outputs’ label with the original label. It will be counted as a correct prediction if output label is same with original label for each test image.4. Display the accuracy score of the model. The accuracy of this model is 95%.
###Code
def test(model, device, test_loader, log_interval):
model.eval()
test_loss = 0
correct = 0
preds = []
targets = []
num_batches = 0
with torch.no_grad():
for data, target in test_loader:
num_batches += 1
data, target = data.to(device), target.to(device).long()
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
preds.append(pred.cpu().numpy())
targets.append(target.cpu().numpy())
# Remove list nesting
preds = np.concatenate(preds).squeeze()
targets = np.concatenate(targets).squeeze()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'
''.format(
test_loss,
correct, len(test_loader.dataset), accuracy,
)
)
return test_loss, accuracy
epoch_log = train(model, device, trainloader, optimizer, epochs, log_interval)
for key, value in epoch_log.items():
train_log[key] += value
val_loss, val_accuracy = test(model, device, testloader, log_interval)
val_log['loss'].append(val_loss)
val_log['accuracy'].append(val_accuracy)
for p in model.parameters():
print(p.shape)
print(p)
###Output
torch.Size([10, 1, 5, 5])
Parameter containing:
tensor([[[[-0.1699, 0.0414, 0.2918, 0.0517, -0.1569],
[-0.1959, 0.2539, 0.3796, 0.2204, -0.1364],
[ 0.2987, 0.4507, 0.0234, -0.1823, -0.3108],
[ 0.2888, 0.4483, -0.1856, -0.4108, -0.2163],
[ 0.3488, 0.2203, 0.0916, -0.0623, 0.0197]]],
[[[-0.0634, -0.2388, -0.1852, -0.1576, -0.1489],
[-0.1525, -0.1028, -0.1742, -0.1264, -0.3083],
[-0.2377, -0.2402, -0.1205, -0.2603, -0.2644],
[-0.0879, -0.1814, -0.2928, -0.1269, -0.1674],
[ 0.0223, -0.1056, -0.0646, -0.0459, 0.0759]]],
[[[-0.2624, -0.3196, -0.3305, -0.1615, -0.3198],
[-0.0221, -0.0673, -0.4137, -0.1216, -0.2547],
[ 0.2298, -0.1082, -0.1349, -0.1962, -0.1277],
[ 0.1290, 0.3818, 0.1026, 0.2193, 0.4721],
[ 0.2717, 0.1170, 0.4938, 0.4447, 0.1268]]],
[[[-0.1400, -0.0397, -0.2403, -0.0440, -0.2005],
[-0.2310, -0.1948, -0.2738, -0.3410, -0.3236],
[-0.0986, 0.0213, -0.2878, -0.0506, -0.3180],
[-0.1833, -0.1677, -0.0464, -0.1346, -0.0836],
[-0.0834, -0.0821, -0.1515, 0.0378, -0.0605]]],
[[[ 0.4042, 0.4214, 0.5299, 0.4928, 0.4902],
[ 0.2666, 0.1746, -0.0516, 0.3550, 0.0406],
[-0.3445, -0.4252, -0.3187, -0.2447, -0.1019],
[-0.4446, -0.1318, -0.1583, -0.1713, -0.1791],
[-0.2475, -0.1042, -0.0537, -0.0968, -0.2465]]],
[[[-0.1823, -0.0773, -0.0431, -0.1722, -0.1932],
[-0.2432, -0.1795, -0.1923, -0.1620, -0.0962],
[-0.2265, -0.2689, -0.3138, -0.2035, -0.2717],
[-0.2501, -0.2690, -0.3109, -0.3079, -0.1605],
[-0.0366, -0.2788, -0.1431, 0.0599, -0.0537]]],
[[[ 0.4036, 0.2043, 0.0995, -0.2412, -0.2820],
[ 0.3251, -0.1751, -0.2347, -0.2917, -0.2415],
[ 0.1056, -0.1301, -0.1166, -0.1552, -0.2374],
[ 0.1874, -0.1827, -0.1741, -0.3449, -0.0682],
[-0.1189, 0.0129, -0.1905, -0.1271, 0.0262]]],
[[[ 0.0158, 0.3041, 0.0155, 0.2039, 0.2540],
[-0.1149, -0.1036, -0.2498, -0.1783, -0.1711],
[-0.1732, -0.1424, -0.1022, -0.3184, -0.2849],
[-0.1237, -0.2391, -0.3123, -0.2747, -0.3631],
[ 0.0434, -0.0892, -0.2610, -0.3356, -0.3309]]],
[[[ 0.3536, 0.2552, -0.1590, -0.0060, 0.1009],
[ 0.0369, 0.2821, -0.2225, -0.2241, -0.1771],
[ 0.1331, 0.0252, -0.1681, 0.0008, -0.3141],
[-0.1327, 0.2343, -0.1259, -0.0055, 0.1324],
[-0.1595, 0.1567, 0.2317, 0.1732, -0.0249]]],
[[[ 0.2225, 0.0951, 0.0317, -0.2371, -0.0556],
[ 0.3450, 0.1651, 0.0455, -0.3576, -0.3206],
[ 0.4685, -0.0179, 0.0918, -0.3088, -0.1689],
[ 0.4748, 0.2242, -0.1778, -0.1592, -0.2691],
[ 0.2772, 0.2503, 0.1574, -0.0903, -0.0361]]]], device='cuda:0',
requires_grad=True)
torch.Size([10])
Parameter containing:
tensor([ 0.1196, -0.1891, 0.1356, -0.0968, 0.1036, 0.0387, 0.1158, -0.1050,
0.2486, 0.3082], device='cuda:0', requires_grad=True)
torch.Size([20, 10, 5, 5])
Parameter containing:
tensor([[[[-2.1296e-01, -1.1523e-01, -1.2847e-01, -7.8007e-02, -1.0911e-01],
[-1.2469e-01, -1.2566e-01, -2.2140e-02, -1.3921e-02, -1.2283e-02],
[ 1.4625e-02, -1.7767e-02, 9.1265e-02, 7.9326e-02, -8.2278e-02],
[ 8.8330e-02, 8.4632e-02, 3.7135e-03, -1.1134e-01, -1.5796e-01],
[-3.4488e-02, -8.1838e-02, -1.7516e-01, -1.2972e-01, -2.1309e-02]],
[[ 5.1410e-02, 9.8163e-02, 6.0050e-03, 1.1556e-02, 1.3308e-02],
[-1.9960e-02, -1.0147e-04, 1.1119e-02, -3.3151e-02, 3.5778e-02],
[-3.9499e-02, 2.8183e-02, -5.4101e-02, -7.2584e-02, -9.5949e-02],
[-5.9390e-02, 1.0211e-02, -6.2738e-02, 5.6964e-03, -4.6847e-02],
[-4.6460e-03, -3.0784e-02, 6.1841e-02, -7.6948e-03, -8.7815e-02]],
[[-2.4515e-02, -1.6974e-02, -6.7369e-03, 1.1265e-01, -3.9796e-03],
[ 8.8255e-02, 1.9537e-01, 2.1408e-01, 1.5457e-01, 9.6785e-02],
[ 2.1581e-01, 1.2392e-01, 8.0885e-02, -4.0607e-02, -1.7642e-02],
[ 1.0981e-01, 4.7165e-02, -2.5960e-02, 7.8967e-02, 1.0021e-01],
[-2.7037e-02, 7.3483e-03, 1.5881e-01, 8.4802e-02, 3.9226e-02]],
...,
[[ 8.6434e-02, -1.0875e-02, 5.1790e-02, 1.6173e-02, -3.6971e-02],
[ 5.9212e-02, -5.7928e-02, 1.5890e-02, -6.3956e-02, -9.5359e-02],
[-1.3764e-02, -1.1541e-01, 3.4598e-03, -2.7527e-02, -3.8706e-02],
[-4.4022e-02, 8.2171e-03, -3.4199e-02, 4.3279e-02, -6.8814e-02],
[ 1.5533e-03, 2.8323e-02, 2.9004e-02, 6.3757e-02, 2.0538e-02]],
[[ 2.8341e-02, -6.1305e-02, 9.4097e-02, 1.3383e-02, 5.0312e-02],
[ 3.4610e-02, 1.2413e-01, 2.0624e-03, 6.7201e-03, -3.7118e-02],
[-8.6491e-02, -1.2380e-01, -4.4594e-02, 1.8225e-02, 6.0449e-02],
[ 3.7533e-03, -3.1139e-02, 2.5549e-02, 7.8600e-02, -4.8689e-02],
[ 1.0392e-01, 2.4711e-02, 9.9582e-02, -8.3436e-02, -1.3151e-01]],
[[-1.3251e-01, -8.6357e-02, -7.2406e-02, 8.1586e-03, -9.6043e-03],
[-3.1289e-02, -6.9252e-02, -6.3863e-02, -6.0749e-02, 9.1689e-03],
[-7.3817e-02, -1.2978e-01, -1.1618e-01, -1.1102e-01, -4.6426e-02],
[ 1.1860e-01, 4.5150e-03, -2.4570e-03, -8.3322e-02, -5.3990e-02],
[ 7.6412e-02, 4.2363e-02, -7.7149e-02, -1.1724e-01, -1.8284e-01]]],
[[[-1.0189e-01, 1.2958e-01, 7.7824e-02, -9.1656e-02, -2.8396e-02],
[ 2.1874e-02, 1.1181e-01, 5.9555e-02, -5.7021e-02, 1.2420e-02],
[ 2.1004e-02, 1.4144e-01, 3.1678e-02, -1.1499e-01, 8.2012e-02],
[ 5.0154e-02, 5.2976e-02, -2.4374e-02, 1.5403e-02, 1.7469e-01],
[ 7.1716e-02, 7.9046e-02, 4.2020e-02, 1.1358e-01, 1.1908e-01]],
[[-8.6080e-03, 1.4849e-03, 1.0636e-01, 6.8308e-02, 8.6318e-05],
[-5.0649e-02, 6.9721e-03, 3.1201e-03, 3.4909e-02, 1.2321e-02],
[ 4.5133e-02, -4.1854e-02, 6.1750e-02, 2.3575e-02, 1.7379e-02],
[ 1.6414e-02, 4.0384e-02, 2.4420e-02, -5.0447e-03, 1.5532e-02],
[-4.7533e-02, -2.5265e-02, -3.7054e-02, -8.5387e-02, -1.0430e-01]],
[[-8.6676e-02, -1.4083e-01, -1.2383e-01, -9.1213e-02, 7.8369e-02],
[-8.6625e-02, -1.0875e-01, -1.6835e-01, -1.1220e-01, -2.3825e-02],
[-1.3437e-01, -1.2402e-01, -3.8279e-02, -4.6088e-02, -2.4027e-02],
[-1.3298e-01, -4.9103e-02, 4.2253e-03, 4.6078e-02, -4.8553e-02],
[-1.2260e-01, -1.3358e-02, 9.5319e-02, 5.8877e-02, -3.3021e-02]],
...,
[[-7.7227e-02, 6.1677e-02, 5.0900e-02, 6.0561e-02, -4.2065e-02],
[-6.7418e-02, 3.8567e-02, 9.2699e-04, -1.8025e-03, -2.6668e-02],
[-2.7204e-02, 5.1432e-02, 6.2408e-02, -4.2255e-02, -3.7823e-02],
[ 3.1592e-02, -5.5702e-02, 2.9705e-02, -1.0922e-02, 2.8033e-02],
[ 3.1777e-02, -4.3798e-02, -1.1774e-01, -7.1713e-02, -9.9403e-03]],
[[-5.8756e-02, -4.4487e-02, 3.0057e-02, -5.2362e-02, -4.1251e-02],
[-1.0212e-01, 7.5981e-03, 4.5858e-03, -1.5542e-01, -1.0669e-01],
[-9.1715e-02, 4.2399e-02, 3.5635e-02, -1.8729e-01, -5.9396e-02],
[-3.3127e-02, 1.1314e-01, 3.2940e-02, -1.8623e-01, -7.6140e-02],
[-7.5128e-02, 1.2131e-02, -6.3666e-02, -8.8693e-02, 8.4341e-02]],
[[ 3.8419e-02, 1.8818e-01, 1.7088e-01, 3.7121e-02, -5.3453e-02],
[ 1.7781e-03, 1.7457e-01, 1.3380e-01, -1.3745e-01, -6.9713e-02],
[ 7.9620e-02, 5.4137e-02, 1.1505e-02, -5.4899e-02, -4.0855e-02],
[ 9.2218e-02, 1.0585e-01, 6.9204e-03, -4.4921e-02, -3.7051e-04],
[ 8.3193e-02, 2.3681e-02, 8.9564e-02, 4.7790e-02, 8.0161e-02]]],
[[[-7.2490e-02, 2.5166e-03, 1.1736e-01, 1.9481e-02, 4.4097e-02],
[-5.8850e-02, -8.5865e-02, -4.2617e-02, -1.4397e-01, -6.8283e-02],
[-7.7176e-02, -1.6426e-01, -1.8192e-01, -2.5836e-01, -8.9263e-02],
[-7.9154e-02, -8.8746e-02, -1.3057e-01, -6.3847e-02, -4.9006e-02],
[-2.4666e-02, -1.3586e-02, 2.8112e-02, -2.5150e-02, 1.6595e-01]],
[[-2.5643e-02, -9.5959e-02, -3.3867e-02, -1.0074e-01, -7.8019e-02],
[-4.4891e-02, -6.9529e-03, 8.3205e-03, -3.4551e-02, -5.9687e-02],
[ 3.5769e-02, 9.5571e-02, 7.0777e-02, 6.4129e-02, 5.4315e-02],
[ 7.4632e-02, 1.2021e-01, 1.2538e-01, 9.9192e-02, 1.4363e-02],
[ 6.5183e-02, 1.1982e-02, 9.5162e-02, -7.5055e-03, 4.9218e-02]],
[[ 1.3845e-01, 8.8139e-02, 8.6925e-02, 5.5321e-03, -1.8545e-03],
[-9.5481e-02, -1.0023e-01, -5.0622e-02, -1.5304e-01, -2.1914e-01],
[ 2.7400e-02, 4.3446e-03, -7.8902e-02, -5.5210e-02, -1.5468e-01],
[ 2.1385e-01, 1.1280e-01, 3.7042e-02, 6.1626e-02, -5.4022e-02],
[ 1.0404e-01, 5.2197e-02, 1.5624e-01, 2.7451e-02, -1.0483e-01]],
...,
[[-1.0674e-01, -5.1361e-02, -1.7148e-02, -1.4283e-01, 3.0078e-03],
[ 1.5005e-02, 9.9656e-03, 4.8655e-02, 6.6447e-02, -1.3862e-02],
[ 3.7545e-02, 1.0759e-01, 1.1414e-01, 6.1564e-02, -1.1829e-02],
[ 2.1417e-02, 1.1332e-01, -1.9988e-02, 6.0025e-02, 1.4640e-02],
[ 3.2112e-02, 5.8585e-03, 3.4450e-02, -8.2708e-02, 6.7291e-02]],
[[-3.3365e-02, -1.7196e-01, -9.0175e-02, 2.7359e-02, 8.0332e-02],
[ 3.2467e-02, -9.0236e-02, 8.9203e-02, 6.4883e-02, 4.8301e-02],
[ 1.6782e-02, -3.3670e-02, -3.2817e-02, -7.5133e-02, -5.8088e-02],
[-5.9414e-02, -1.2396e-01, -1.5569e-01, -1.2082e-01, -1.2941e-01],
[ 8.9911e-03, -6.5031e-02, -4.9076e-02, -6.8226e-02, 1.5072e-02]],
[[ 1.8002e-02, 2.3675e-04, 8.8505e-02, 3.9969e-02, 6.8230e-02],
[-8.6967e-02, -7.3693e-02, -2.8913e-02, -6.5312e-02, -2.8940e-02],
[ 4.2227e-02, -9.0205e-02, -1.8919e-01, -1.8189e-01, -1.0945e-01],
[-4.2061e-02, -1.8190e-02, -6.5779e-02, -1.1629e-01, 3.5956e-02],
[-4.0292e-02, -1.6663e-02, -3.5642e-03, -3.9633e-02, 1.2654e-01]]],
...,
[[[-1.0084e-01, -1.5945e-02, 3.6910e-02, 6.1091e-02, -5.3894e-03],
[-1.2131e-01, 7.2952e-02, 8.7419e-02, 7.4902e-02, 2.7173e-02],
[ 5.3403e-02, 6.6040e-02, 5.9823e-02, -2.4686e-02, -6.0907e-02],
[-3.7324e-02, 1.1637e-01, 1.3581e-01, -1.5352e-02, 4.2514e-02],
[-1.1340e-01, 4.7608e-02, 1.1170e-01, -2.7014e-02, 4.7917e-02]],
[[ 5.1161e-02, -3.5524e-02, 6.6678e-02, 1.0319e-02, -2.3483e-02],
[ 3.0736e-02, -6.8465e-02, -2.5458e-02, 1.1318e-02, 8.5584e-02],
[-4.0230e-02, -7.8366e-02, 4.3533e-02, 1.0067e-02, -6.9352e-03],
[-8.0519e-02, -7.1628e-02, -5.5071e-02, 6.5712e-02, -1.4856e-03],
[ 2.0377e-02, -9.1244e-03, -7.1120e-02, -1.3397e-02, -7.4850e-02]],
[[-1.5943e-01, -8.4724e-02, -2.6729e-02, -1.3252e-01, -3.9481e-02],
[ 7.3731e-03, -3.6572e-02, -6.1569e-02, -8.1424e-02, 3.5019e-02],
[-3.9273e-02, 5.0927e-04, 9.5973e-03, -6.1763e-02, -1.1038e-02],
[-1.2197e-02, 6.4942e-03, 1.4183e-01, 8.8670e-02, 7.2549e-02],
[-1.7239e-01, -1.9898e-02, 6.9535e-02, 1.4837e-01, 1.8343e-01]],
...,
[[ 1.8077e-02, 1.8059e-02, -4.2669e-02, 6.8707e-02, 6.5873e-02],
[-2.8533e-02, -1.0615e-01, -1.2754e-03, 2.2458e-02, 1.9923e-02],
[-8.1609e-02, -2.9611e-02, 1.5606e-02, 3.9640e-02, 3.8940e-02],
[-5.1189e-02, -1.8874e-04, -2.7311e-02, -2.9771e-02, -9.0825e-02],
[ 5.9941e-02, -8.3184e-02, -1.1332e-01, -4.3066e-02, -8.7637e-02]],
[[-7.0443e-02, -2.6889e-02, -3.9078e-02, -9.8615e-02, 4.3984e-02],
[-8.5978e-02, -4.0636e-03, -1.4533e-02, -8.2468e-02, -5.7728e-02],
[-1.2829e-01, 4.8732e-05, 4.3791e-02, -6.9404e-03, 1.6964e-02],
[-7.2303e-02, 1.1409e-01, 5.1648e-02, 3.8576e-02, -6.3796e-02],
[-6.9330e-02, 6.4076e-02, 9.7506e-02, -2.8993e-02, -3.4117e-02]],
[[-7.2305e-02, 7.7761e-02, 6.5961e-02, 1.7877e-02, 5.2819e-02],
[-5.5494e-02, 7.4691e-02, 1.2320e-01, 1.5259e-02, -2.3773e-02],
[ 2.7994e-02, 2.3434e-02, 1.0831e-01, 6.5794e-02, -6.4870e-02],
[ 3.2931e-02, 8.9131e-02, 1.7760e-01, -1.0204e-02, -1.8802e-02],
[-5.0616e-02, 1.6601e-02, 9.0813e-02, 2.0051e-02, -5.4247e-02]]],
[[[-7.1863e-02, 5.2834e-02, 3.1034e-02, -5.5321e-02, 1.9641e-02],
[ 5.7032e-02, 6.1167e-03, 8.9154e-02, 4.8450e-02, 5.1239e-02],
[-1.5552e-02, -3.2068e-02, 8.1429e-02, 1.1577e-01, 8.9209e-02],
[-6.0246e-02, 2.4984e-02, 2.3812e-02, 1.0714e-01, 7.1284e-02],
[-1.2646e-01, -1.0988e-01, 5.4628e-02, 1.3875e-02, 8.1069e-02]],
[[-9.7546e-03, 3.7445e-02, 8.1617e-02, 3.8694e-04, 7.3808e-02],
[-6.1809e-02, -1.2418e-01, -3.7904e-02, 4.3552e-02, 6.8847e-02],
[-1.0343e-01, -1.1002e-01, -3.4341e-02, -2.3838e-03, 2.4779e-02],
[ 4.3917e-03, -1.5098e-03, -1.6965e-01, -1.1247e-02, -1.7607e-02],
[ 3.9384e-03, 2.3578e-02, -3.6514e-02, -8.6011e-02, -8.3123e-02]],
[[ 1.7681e-01, 1.6169e-01, 4.1400e-02, -3.0838e-02, -1.0361e-01],
[-4.4703e-02, 5.9171e-02, 2.9878e-02, 2.0734e-02, 4.4790e-02],
[-1.7329e-01, -1.9078e-01, 5.9982e-02, 8.3073e-02, 1.8670e-02],
[-1.2092e-01, -1.0664e-01, 1.2251e-01, 9.7842e-02, 1.1347e-01],
[ 1.9365e-01, 6.2976e-02, 4.3516e-02, 9.4356e-02, 5.0876e-02]],
...,
[[-5.3186e-02, 9.9371e-03, 2.4752e-02, 8.5834e-02, 1.2065e-01],
[-1.4361e-01, -1.5840e-01, -4.9717e-02, 1.5453e-02, 4.4933e-02],
[-1.3790e-02, -1.1067e-01, -7.8040e-02, 5.7644e-02, -5.3606e-02],
[ 1.0233e-02, -3.1050e-02, -1.0814e-01, -2.3084e-02, 2.6553e-02],
[ 8.0131e-02, 7.0191e-02, 1.9732e-02, -7.8407e-02, -6.3637e-02]],
[[ 5.7606e-02, 4.6288e-02, -1.4384e-02, 2.3973e-02, 6.8170e-02],
[-1.2618e-01, -2.0312e-02, 5.2567e-02, 6.5080e-02, 4.5798e-03],
[ 2.5685e-02, 7.3456e-02, 1.0126e-01, 7.6159e-02, 6.6861e-02],
[ 9.5826e-02, 3.7065e-02, 1.9106e-01, 1.6730e-01, 3.1743e-02],
[ 8.0429e-02, -6.4095e-02, -9.0497e-03, 5.0386e-02, -1.0656e-02]],
[[-1.2691e-02, 8.0021e-02, 1.0155e-01, 7.3718e-02, 6.0238e-02],
[-1.4572e-01, 6.2270e-02, 1.8046e-01, 1.0779e-01, 1.5684e-02],
[-2.8015e-02, -4.4381e-02, 1.3826e-01, 1.6763e-01, 1.4257e-01],
[-3.6274e-02, -1.6930e-01, 1.2669e-01, 7.2290e-02, 9.5468e-02],
[-8.9537e-02, -1.4866e-01, 2.6519e-02, 1.7416e-02, -3.0924e-02]]],
[[[-8.6800e-03, -4.1210e-02, -1.2591e-01, 4.8430e-02, 1.9298e-01],
[-5.6550e-02, -1.9172e-01, -2.1415e-02, 1.8159e-01, 1.8119e-01],
[-6.6875e-03, 6.5319e-02, 2.0624e-01, 6.0155e-02, -1.6494e-02],
[ 6.7419e-02, 9.1557e-02, 5.3276e-02, -1.8139e-01, -2.0887e-01],
[ 3.1320e-02, -3.5291e-02, -1.0179e-01, -5.4208e-02, -1.2752e-01]],
[[-3.3059e-02, 3.0617e-02, -6.3349e-02, -6.3914e-02, -7.5905e-02],
[-4.5954e-02, -6.5920e-03, -6.6184e-02, -1.0826e-01, 2.4739e-02],
[-6.4912e-02, -9.2454e-02, -5.7252e-02, -9.1471e-02, 2.3498e-02],
[-5.9081e-02, -5.2612e-02, -2.4521e-02, 2.0399e-02, 6.1022e-02],
[ 1.6731e-02, 2.0700e-02, 1.9251e-02, 4.5032e-02, 1.2583e-01]],
[[-1.7365e-01, -1.0492e-01, -4.2621e-02, -6.1872e-02, 4.2522e-02],
[ 4.1242e-02, 1.1564e-01, 6.2673e-02, -5.4768e-02, -1.7051e-02],
[ 1.3688e-01, 1.0965e-01, -2.7670e-02, -6.0165e-02, -3.6979e-03],
[ 4.3008e-03, -1.4044e-01, -1.2901e-01, -1.4571e-03, -7.8216e-02],
[-2.0710e-01, -7.0168e-02, 1.9734e-02, 2.3581e-02, 1.9403e-03]],
...,
[[-1.1551e-02, 5.3595e-02, -5.6397e-02, -8.4822e-02, -2.2760e-02],
[ 6.9170e-02, -4.0249e-02, -7.7863e-02, 1.4601e-03, 2.7776e-02],
[-1.2462e-01, -1.6127e-01, -6.2173e-03, 2.7737e-02, 3.7341e-02],
[-8.4521e-02, 3.7860e-03, 1.8753e-02, 1.3173e-01, 1.1375e-01],
[-1.8739e-03, 5.1742e-02, 8.3925e-02, 9.5313e-03, 7.6359e-02]],
[[-3.5156e-02, 2.2571e-02, -8.7477e-02, -7.4955e-02, 1.6611e-01],
[ 1.0585e-01, 1.0849e-01, -6.7651e-02, -1.3784e-02, 7.5738e-02],
[-3.8847e-02, -2.7655e-01, -4.5354e-02, 9.0042e-02, 1.1045e-01],
[-6.3922e-02, -3.7735e-02, 1.3408e-01, 1.3787e-01, 5.9036e-02],
[ 1.8951e-01, 1.6951e-01, 2.4616e-02, 4.8317e-02, 3.6760e-03]],
[[-3.6190e-02, -1.8240e-02, -1.2803e-01, -4.7897e-02, 2.0228e-01],
[-3.3954e-02, -1.6153e-01, -1.1322e-01, 2.0031e-02, 1.0126e-01],
[-8.0046e-02, -1.5129e-01, -1.6336e-02, 4.3053e-02, 1.5958e-02],
[ 1.1671e-02, 3.8694e-02, 8.9952e-02, -5.8921e-02, -6.3521e-02],
[ 2.8955e-02, 2.2342e-02, 3.7149e-02, 2.5152e-03, -6.8771e-02]]]],
###Markdown
Is it working???The best way to figure out if it is working is to test the model on data the learning process hasn't used. Luckily we have such a dataset (it is basically a held out section of the data we already have used). I'm loading it all up the same way as before and printing them out to show you that they're different.
###Code
def collapse_nested_list(l):
return [a for b in l for a in b]
def plot_training_log(train_log, val_log):
#xx_train = np.arange(0, len(train_log['loss']), 1 / len(train_log['loss'][0]))
xx_train = np.array(train_log['n_samples']) / 1000
n_epoch = len(val_log['loss'])
xx_val = np.arange(1, n_epoch + 1) * xx_train[-1] / n_epoch
plt.plot(xx_train, train_log['loss'], 'b')
# We prepend the first train loss score so there are enough datapoints to plot
# a line, even with a single epoch.
plt.plot(np.concatenate([[0], xx_val]), [train_log['loss'][0]] + val_log['loss'], 'ro-')
plt.title('Loss (lower is better)')
plt.xlabel('Number of samples presented (1000s)')
plt.ylabel('Cross-Entropy Loss')
plt.show()
plt.plot(xx_train, train_log['accuracy'], 'b')
plt.plot(np.concatenate([[0], xx_val]), [train_log['accuracy'][0]] + val_log['accuracy'], 'ro-')
plt.title('Accuracy (higher is better)')
plt.xlabel('Number of samples presented (1000s)')
plt.ylabel('Accuracy (%)')
plt.show()
if n_epoch > 1:
plt.plot(xx_train, train_log['accuracy'], 'b')
plt.plot(np.concatenate([[0], xx_val]), [10] + val_log['accuracy'], 'ro-')
plt.ylim([90, 100])
plt.title('Accuracy, zoomed in')
plt.xlabel('Number of samples presented (1000s)')
plt.ylabel('Accuracy (%)')
plt.show()
plt.plot(xx_train, 1000 * np.array(train_log['time_batch_avg']), 'b')
plt.title('Training speed (lower is better)')
plt.xlabel('Number of samples presented (1000s)')
plt.ylabel('Duration per batch (ms)')
plt.show()
plot_training_log(train_log, val_log)
###Output
_____no_output_____
###Markdown
Output Visualization Let's see how well we do over *all* of the test data!
###Code
images, labels = next(iter(testloader))
if not no_cuda:
images = images.to(device)
fig=plt.figure(figsize=(10, 20))
for i in range (0, 10, 2):
img1 = images[i].view(1, 784)
img2 = images[i+1].view(1, 784)
with torch.no_grad():
if not no_cuda:
input = Variable(img1)
input = input.to(device)
output = model(input)
index = output.data.cpu().numpy().argmax()
fig.add_subplot(5, 2, i+1)
plt.imshow(img1.resize_(1, 28, 28).data.cpu().numpy().squeeze())
plt.title("Predicted Digit = {}".format(index))
input = Variable(img2)
input = input.to(device)
output = model(input)
index = output.data.cpu().numpy().argmax()
fig.add_subplot(5, 2, i+2)
plt.imshow(img2.resize_(1, 28, 28).data.cpu().numpy().squeeze())
plt.title("Predicted Digit = {}".format(index))
else:
logps = model(img1)
ps = torch.exp(logps)
probab = list(ps.numpy()[0])
fig.add_subplot(5, 2, i+1)
plt.imshow(img1.resize_(1, 28, 28).numpy().squeeze())
plt.title("Predicted Digit = {}".format(probab.index(max(probab))))
logps = model(img2)
ps = torch.exp(logps)
probab = list(ps.numpy()[0])
fig.add_subplot(5, 2, i+2)
plt.imshow(img2.resize_(1, 28, 28).numpy().squeeze())
plt.title("Predicted Digit = {}".format(probab.index(max(probab))))
###Output
_____no_output_____
###Markdown
Saving the ModelEvery framework is different - in this case PyTorch let's us save the model (which you remember is just a big matrix `W` and a vector `b`) to an internal format as well as to the ONNX format. These can then be loaded up as an asset to a program that is executed every time you need to recognize a digit!
###Code
if not no_cuda:
x = torch.randint(255, (1, 28*28), dtype=torch.float).to(device) / 255
else:
x = torch.randint(255, (1, 28*28), dtype=torch.float) / 255
onnx.export(model, x, 'model.onnx')
print('Saved onnx model to model.onnx')
torch.save(model.state_dict(), 'model.pth')
print('Saved PyTorch Model to model.pth')
###Output
Saved onnx model to model.onnx
Saved PyTorch Model to model.pth
###Markdown
Code for AICHIスキルシェア(Visual Recognition編)このパートでは画像認識を行うためのAPIである、「Watson Visual Recognition」を触ってみます。ここではLanguage Translatorのデモを終えてる前提で話を進めます。 まずは、Visual Recognitionのリソースを作成します。その後、サービス資格情報から以下の情報を入力します。
###Code
API_KEY='YOUR_API_KEY'
API_URL='YOUR_URL'
###Output
_____no_output_____
###Markdown
次に下記のセルを実行し、Watson APIのpython SDKをダウンロードします。
###Code
!pip install --upgrade "ibm-watson>=3.0.3"
###Output
_____no_output_____
###Markdown
下記のセルを実行し実際にAPIを呼び出します。versionの項目は特に指定がなければ、 https://cloud.ibm.com/apidocs/visual-recognition?code=pythonversioning を確認し、最新バージョンを指定しましょう。(2019年6月現在は```2018-03-19```が最新バージョンです)
###Code
from ibm_watson import VisualRecognitionV3
visual_recognition = VisualRecognitionV3(
version='2018-03-19',
iam_apikey=API_KEY
)
###Output
_____no_output_____
###Markdown
ここまでで準備ができました。では、次からは画像認識をやっていきます。 1. 画像認識(一般モデル)-URLを指定する-事前学習済みの分類器を使った出力します。一般モデルでは、数千種類の様々なクラス・キーワードから該当するものを識別します。いわゆる画像へのタグ付けです。クラス・キーワード(タグ)は、階層的にカテゴライズされていて、最上位カテゴリーとしては、動物、 人間および個人とそのアクティビティ、食品、植物、スポーツ、自然、運輸、家具、フルーツ、楽器、工具、色、装置・機器、武器、建物、構造物・人工物、衣類等々があります。(参考:https://cloud.ibm.com/apidocs/visual-recognition?code=pythonclassify-images )- 入力:認識させたい画像ファイル(jpegかpng)またはURL- 出力 - 画像に写っているもの、分類結果 - スコア(画像の確信度) - もの・分類結果の階層構造 (動物-家畜-犬-小型犬・・・といったタグの階層) まずはサンプル画像のURLから画像認識をやっていきます([このリンクの画像](https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/fruitbowl.jpg)を使います)。下記のセルを実行しましょう。
###Code
image_url = 'https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/fruitbowl.jpg'
import json
classes = visual_recognition.classify(
url=image_url,
threshold='0.6',
accept_language='ja').get_result()
print(json.dumps(classes, indent=2, ensure_ascii=False))
###Output
_____no_output_____
###Markdown
どうですか?JSONの形式で結果が出力されたと思います。さらにその結果が日本語で出力されていることが分かるかと思います。これは```accept_language```で言語のIDを指定することで出力されます。指定できる言語は11言語でデフォルトは```en```です。 2. 画像認識(一般モデル)-自前で用意する-では、今度は自前で用意した画像を読み込ませる作業を行います。皆さんのPC上に保存されている画像をなんでもいいので用意してください。(ない場合は[こちらの画像](https://github.com/Miura55/20190625_SkillShare/blob/master/img/ramen.jpeg)をご自分のPCにインストールしてください。)用意したら、Watson Studioで画像を取り込む作業を行う必要があります。以下の作業を行います。 1. 右上のアイコンの中から正方形が3つと数字の1で構成されたアイコンを探し、クリックします。2. ```files```タブを選択されていることを確認し、画像を```Drop your file here or browse your files to add a new file```にドラッグ&ドロップするか、```browse```をクリックして画像ファイルを選択します。3. 下記のセルをクリックし、```Insert to code```を選択し、```Insert StreamingBody object```をクリックしてファイルを読み込むためのコードを生成します。4. このとき、下の方の整数が```streaming_body_1```ではなく、```streaming_body_2```になっている場合は、```streaming_body_1```に修正してください。
###Code
# ここをクリック
###Output
_____no_output_____
###Markdown
コードが生成できたら、今度は画像をストレージに保存することなくプログラム上だけで処理できるようにファイルのコピーを行います。下のセルを実行してください。
###Code
from io import BytesIO
filename = "ramen.jpeg"
file = BytesIO(streaming_body_1.read())
with open(filename, "wb") as out:
out.write(file.read())
###Output
_____no_output_____
###Markdown
画像のコピーが出来ているか確認するために実際に画像を表示させてみます。下のセルを実行してください。
###Code
from PIL import Image
# 画像が読み込めたかを確認
with open(filename, 'rb') as images_file:
image = Image.open(filename)
display(image)
###Output
_____no_output_____
###Markdown
いよいよ読み込んだ画像を認識していきます。下記のセルを実行してください。
###Code
with open(filename, 'rb') as images_file:
classes = visual_recognition.classify(
images_file,
threshold='0.6',
accept_language='ja').get_result()
print(json.dumps(classes, indent=2, ensure_ascii=False))
###Output
_____no_output_____
###Markdown
3. もう少し見やすくしてみるここまでで画像を読み込んで認識させる結果を見てきました。しかし、出力されるものがJSONであるため結果を見るには少し見づらいです。そこでpandasを使って表を出力することと、matplotlibを使用してグラフを表示していくことをやっていきます。pandasとは、表形式のデータの取扱を行うためのpythonライブラリで、matplotlibとは、グラフを作成するために使用されるpyhonライブラリです。先程出力したjsonファイルのうち、```class```要素をx軸のラベルにつけたいですが、日本語を入れるには別途フォントをインストールする必要があります。まずは下記のセルを実行し、日本語のフォントファイルをインストールします。
###Code
import os
#日本語フォントの導入
jp_font_path ='ipaexg00301/ipaexg.ttf'
if not os.path.exists(jp_font_path):
!wget https://oscdl.ipa.go.jp/IPAexfont/ipaexg00301.zip
!unzip ipaexg00301.zip
else:
print('IPA font has been already installed')
###Output
_____no_output_____
###Markdown
インストールを終えたら、下記のセルを実行し認識結果をグラフで表示します。(もし何もグラフが表示されない場合はもう一回セルを実行してみるとグラフが表示されると思います。)
###Code
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname=jp_font_path, size=10)
# 値を設定
values = [value for value in classes["images"][0]["classifiers"][0]["classes"]]
label = [label["class"] for label in values]
plt.xticks(range(len(values)), label, fontproperties=fp, rotation=45)
# グラフを表示
plt.bar(range(len(values)), [score["score"] for score in values], color="blue")
plt.show()
# 結果一覧を表示
df_face_detect_res = pd.DataFrame({"score":[score["score"] for score in values]},
index=label)
display(df_face_detect_res)
###Output
_____no_output_____
###Markdown
pandasやmatplotlibを使うとこのようにデータの可視化を容易にできます。そして、jupyter-notebook上で使用することで、実行したセルの下にそのまま表示してくれます。データサイエンスでjupyter-notebookとの相性がいい理由はこのようにデータを可視化しやすくするための環境が整っているからということもあります。 3. 顔認識にチャレンジここまでは物体認識の話を取り上げてきましたが、ここからは人の顔を認識することをやっていきます。Visual Recognitionには顔認識専用のモデルもデフォルトに存在し、顔の位置、年齢、性別などを判定してくれます。それでは、下記のセルを実行し、画像のインストールと使う画像を見ていきます。今回の画像はネットで見つけたフリー素材です。(リンクが無効になって使えなくなる可能性があります)
###Code
!wget https://skybiometry.com/wp-content/uploads/2015/09/work-2-1-e1451907791984.jpg -O people.jpg
with open("./people.jpg", "rb"):
image = Image.open("./people.jpg")
display(image)
###Output
_____no_output_____
###Markdown
それでは、インストールした画像を実際に認識させてみましょう。下記のセルを実行します。
###Code
with open('./people.jpg', 'rb') as images_file:
faces = visual_recognition.detect_faces(images_file).get_result()
print(json.dumps(faces, indent=2))
###Output
_____no_output_____
###Markdown
なんとなく認識されていることが分かりますがこれでは、どの顔がどのように認識されているのかわかりません。そこで画像に印をつけて顔認識結果がわかるように下記の関数を定義します。
###Code
from PIL import Image, ImageDraw,ImageFont
import os
def draw_face_area(image_file, face_detect_res):
if len(face_detect_res) < 1:
print('No face detection')
return
image = Image.open(image_file)
draw = ImageDraw.Draw(image, "RGBA")
#日本語フォントの導入
jp_font_path ='ipaexg00301/ipaexg.ttf'
if not os.path.exists(jp_font_path):
!wget https://oscdl.ipa.go.jp/IPAexfont/ipaexg00301.zip
!unzip ipaexg00301.zip
else:
print('IPA font has been already installed')
col_name = ["gender", "gender_score", "age_max", "age_min", "age_score"]
df_face_detect_res = pd.DataFrame(columns=[])
for i, faceinfo in enumerate(face_detect_res):
x0 = faceinfo['face_location']['left']
x1= x0 + faceinfo['face_location']['width']
y0 = faceinfo['face_location']['top']
y1 = y0 + faceinfo['face_location']['height']
df_face_detect_res.loc[i, "gender"] = faceinfo['gender']['gender']
df_face_detect_res.loc[i, "gender_score"] = faceinfo['gender']['score']
df_face_detect_res.loc[i, "age_max"] = faceinfo['age']['max']
df_face_detect_res.loc[i, "age_min"] = faceinfo['age']['min']
df_face_detect_res.loc[i, "age_score"] = faceinfo['age']['score']
font_size = 20
font = ImageFont.truetype(jp_font_path, font_size)
text_size = draw.textsize('88', font=font)
if not ( x1-x0 < text_size[0] or y1-y0 < text_size[1]):
while x1-x0 > text_size[0] or y1-y0 > text_size[1]:
font = ImageFont.truetype(jp_font_path, font_size)
text_size = draw.textsize('88', font=font)
font_size += 1
font = ImageFont.truetype(jp_font_path, font_size)
draw.rectangle(xy=(x0,y0, x1, y1), outline=(0, 249, 0))
draw.text(xy=(x0+5,y0+5), text=str(i), fill=(0, 249, 0), font=font)
display(image)
pd.options.display.max_rows = None
display(df_face_detect_res)
###Output
_____no_output_____
###Markdown
関数を定義して以下のセルを実行します。画像上で認識された顔に番号が振られ、更に性別、大体の年齢、それぞれのスコアを表で出力されます。
###Code
draw_face_area('people.jpg', faces['images'][0]['faces'])
###Output
_____no_output_____
###Markdown
Ordenação de listasFonte: WikipediaNeste laboratório discutiremos diferentes abordagens de ordenação consolidadas na literatura.Os algoritmos de ordenação são importantes recursos para facilitar o acesso eficiente de dados. As estratégias podem ser divididas em: métodos simples e sofisticados.
###Code
from estruturas.pilha import *
from estruturas.fila import *
from estruturas.deque import *
from estruturas.pilha_dinamica import *
from estruturas.fila_dinamica import *
from estruturas.lista import *
###Output
_____no_output_____
###Markdown
Métodos simples Insertion SortInsertion Sort, ou ordenação por inserção, é um algoritmo de ordenação que, dado uma estrutura (array, lista) constrói uma matriz final com um elemento de cada vez, uma inserção por vez. Assim como algoritmos de ordenação quadrática, é bastante eficiente para problemas com pequenas entradas, sendo o mais eficiente entre os algoritmos desta ordem de classificação.Podemos fazer uma comparação do Insertion Sort com o modo como algumas pessoas organizam um baralho num jogo de cartas. Imagine que você está jogando ás cartas. Você está com as cartas na mão e elas estão ordenadas. Você recebe uma nova carta e deve colocá-la na posição correta da sua mão de cartas, de forma a que as cartas obedeçam à ordenação.A cada nova carta adicionada à sua mão de cartas, a nova carta pode ser menor que algumas das cartas que você já tem na mão ou maior, e assim, você começa a comparar a nova carta com todas as cartas na sua mão até encontrar sua posição correta. Você insere a nova carta na posição correta, e, novamente, a sua mão é composta de cartas totalmente ordenadas. Então, você recebe outra carta e repete o mesmo procedimento. Então outra carta, e outra, e assim em diante, até não receber mais cartas.Esta é a ideia por trás da ordenação por inserção. Percorra as posições do array, começando com o índice 1 (um). Cada nova posição é como a nova carta que você recebeu, e você precisa inseri-la no lugar correto no subarray ordenado à esquerda daquela posição. ```def insertion_sort( lista ): for i in range( 1, len( lista ) ): chave = lista[i] k = i while k > 0 and chave < lista[k - 1]: lista[k] = lista[k - 1] k -= 1 lista[k] = chave```
###Code
def main():
lista = Lista()
lista.insere_esq(3)
lista.insere_esq(15)
lista.insere_dir(2)
lista.insere_dir(27)
lista.insere_esq(35)
lista.insere_dir(11)
lista.insere_esq(7)
lista.insere_dir(1)
lista.insere_esq(67)
lista.insere_dir(81)
lista.insere_esq(99)
lista.insere_dir(43)
lista.insere_esq(12)
lista.mostra()
if __name__ == "__main__":
main()
###Output
12 >> 99 >> 67 >> 7 >> 35 >> 15 >> 3 >> 2 >> 27 >> 11 >> 1 >> 81 >> 43
###Markdown
1. Adapte o algoritmo apresentado para ordenar nossa lista:
###Code
def ordena_insercao(lista):
#sua implementação aqui
# 1. cria uma estrutura nova
# 2. insere os dois primeiros elementos da estrura anterior
# 3. pega o i-esimo elemento da estrutura anterior e posiciona ordenamente com relação a nova estrutura
# 4. i + 1, repete 3
return lista
###Output
_____no_output_____
###Markdown
Selection SortA ordenação por seleção (do inglês, selection sort) é um algoritmo de ordenação baseado em se passar sempre o menor valor do vetor para a primeira posição (ou o maior dependendo da ordem requerida), depois o de segundo menor valor para a segunda posição, e assim é feito sucessivamente com os n − 1 {\displaystyle n-1} n-1 elementos restantes, até os últimos dois elementos.  Vantagens* Ele é um algoritmo simples de ser implementado em comparação aos demais.* Não necessita de um vetor auxiliar (in-place).* Por não usar um vetor auxiliar para realizar a ordenação, ele ocupa menos memória.* Ele é uns dos mais velozes na ordenação de vetores de tamanhos pequenos. Desvantagens* Ele é um dos mais lentos para vetores de tamanhos grandes.* Ele não é estável.* Ele faz sempre \math{( n^{2} − n ) / 2} {\displaystyle (n^{2}-n)/2} {\displaystyle (n^{2}-n)/2} comparações, independentemente do vetor estar ordenado ou não.Implementação em C```void selection_sort(int num[], int tam) { int i, j, min, aux; for (i = 0; i < (tam-1); i++) { min = i; for (j = (i+1); j < tam; j++) { if(num[j] < num[min]) min = j; } if (i != min) { aux = num[i]; num[i] = num[min]; num[min] = aux; } }}```2. Implemente o algoritmo em Python para a nossa estrutura de lista.
###Code
def ordena_selecao(lista):
#sua implementação aqui
return lista
###Output
_____no_output_____
###Markdown
Bubble SortO bubble sort, ou ordenação por flutuação (literalmente "por bolha"), é um algoritmo de ordenação dos mais simples. A ideia é percorrer o vector diversas vezes, e a cada passagem fazer flutuar para o topo o maior elemento da sequência. Essa movimentação lembra a forma como as bolhas em um tanque de água procuram seu próprio nível, e disso vem o nome do algoritmo.No melhor caso, o algoritmo executa n {\displaystyle n} n operações relevantes, onde n {\displaystyle n} n representa o número de elementos do vector. No pior caso, são feitas n 2 {\displaystyle n^{2}} n^2 operações. A complexidade desse algoritmo é de ordem quadrática. Por isso, ele não é recomendado para programas que precisem de velocidade e operem com quantidade elevada de dados. Código em C:```includeincludevoid swap(int *a, int *b){ int temp = *a; *a = *b; *b = temp; } void bubbleSort(int *v, int n){ if (n < 1)return; for (int i=0; i<n; i++) if (v[i] > v[i+1]) swap(&v[i], &v[i+1]); bubbleSort(v, n-1); } int main(){ int tam,i,*v; scanf("%d",&tam); v=(int*)malloc(tam*sizeof(int)); for(i=0;i<tam;i++)scanf("%d",&v[i]); bubbleSort(v,tam-1); for(i=0;i<tam;i++)printf("%d ",v[i]); return 0;}```3. Implemente o algoritmo em Python para a nossa lista.
###Code
def ordena_bolha(lista):
#sua implementação aqui
return lista
###Output
_____no_output_____
###Markdown
Métodos Sofisticados Merge SortO merge sort, ou ordenação por mistura, é um exemplo de algoritmo de ordenação por comparação do tipo dividir-para-conquistar.Sua ideia básica consiste em Dividir (o problema em vários subproblemas e resolver esses subproblemas através da recursividade) e Conquistar (após todos os subproblemas terem sido resolvidos ocorre a conquista que é a união das resoluções dos subproblemas). Como o algoritmo Merge Sort usa a recursividade, há um alto consumo de memória e tempo de execução, tornando esta técnica não muito eficiente em alguns problemas.  Desvantagens* Utiliza funções recursivas;* Gasto extra de memória. O algoritmo cria uma cópia do vetor para cada nível da chamada recursiva, totalizando um uso adicional de memória igual a O ( n log n ) {\displaystyle O(n\log n)} {\displaystyle O(n\log n)}.Código em C```void merge(int vetor[], int comeco, int meio, int fim) { int com1 = comeco, com2 = meio+1, comAux = 0, tam = fim-comeco+1; int *vetAux; vetAux = (int*)malloc(tam * sizeof(int)); while(com1 <= meio && com2 <= fim){ if(vetor[com1] < vetor[com2]) { vetAux[comAux] = vetor[com1]; com1++; } else { vetAux[comAux] = vetor[com2]; com2++; } comAux++; } while(com1 <= meio){ //Caso ainda haja elementos na primeira metade vetAux[comAux] = vetor[com1]; comAux++; com1++; } while(com2 <= fim) { //Caso ainda haja elementos na segunda metade vetAux[comAux] = vetor[com2]; comAux++; com2++; } for(comAux = comeco; comAux <= fim; comAux++){ //Move os elementos de volta para o vetor original vetor[comAux] = vetAux[comAux-comeco]; } free(vetAux);}void mergeSort(int vetor[], int comeco, int fim){ if (comeco < fim) { int meio = (fim+comeco)/2; mergeSort(vetor, comeco, meio); mergeSort(vetor, meio+1, fim); merge(vetor, comeco, meio, fim); }}```4. Implemente o Merge Sort em Python.
###Code
def ordena_merge(lista):
https://github.com/VanessaSilva99/EstruturaDeDados2/blob/main/Lab03/Lab02/Merge_Sort
return lista
###Output
_____no_output_____
###Markdown
QuicksortO algoritmo quicksort é um método de ordenação muito rápido e eficiente, inventado por C.A.R. Hoare em 1960[1], quando visitou a Universidade de Moscovo como estudante. Naquela época, Hoare trabalhou em um projeto de tradução de máquina para o National Physical Laboratory. Ele criou o quicksort ao tentar traduzir um dicionário de inglês para russo, ordenando as palavras, tendo como objetivo reduzir o problema original em subproblemas que possam ser resolvidos mais fácil e rápido. Foi publicado em 1962 após uma série de refinamentos.[2]O quicksort é um algoritmo de ordenação por comparação não-estável.O quicksort adota a estratégia de divisão e conquista. A estratégia consiste em rearranjar as chaves de modo que as chaves "menores" precedam as chaves "maiores". Em seguida o quicksort ordena as duas sublistas de chaves menores e maiores recursivamente até que a lista completa se encontre ordenada. [3]Os passos são:* Escolha um elemento da lista, denominado pivô;* Particiona: rearranje a lista de forma que todos os elementos anteriores ao pivô sejam menores que ele, e todos os elementos posteriores ao pivô sejam maiores que ele. Ao fim do processo o pivô estará em sua posição final e haverá duas sub listas não ordenadas. Essa operação é denominada partição;* Recursivamente ordene a sub lista dos elementos menores e a sub lista dos elementos maiores;O caso base da recursão são as listas de tamanho zero ou um, que estão sempre ordenadas. O processo é finito, pois a cada iteração pelo menos um elemento é posto em sua posição final e não será mais manipulado na iteração seguinte.A escolha do pivô e os passos do Particiona podem ser feitos de diferentes formas e a escolha de uma implementação específica afeta fortemente a performance do algoritmo.```algorithm quicksort(A, lo, hi) is if lo < hi then p := particiona(A, lo, hi) quicksort(A, lo, p – 1) quicksort(A, p + 1, hi)algorithm particiona(A, lo, hi) is pivot := A[hi] i := lo - 1 for j := lo to hi - 1 do if A[j] < pivot then i := i + 1 swap A[i] with A[j] if pivot < A[i + 1] then swap A[i + 1] with A[hi] return i + 1```5. Implemente o Quicksort em Python para a nossa lista.
###Code
def ordena_quick(lista):
https://github.com/VanessaSilva99/EstruturaDeDados2/blob/main/Lab03/Lab02/Quick_Sort
https://github.com/VanessaSilva99/EstruturaDeDados2/blob/main/Lab03/Lab02/test_Quick_Sort
return lista
###Output
_____no_output_____
###Markdown
1) Construct a list (shoppingList) including 'potatoes', 'carrots', 'cod' and 'sprouts’
###Code
shoppingList= ['potatoes','carrots','cod','sprouts']
shoppingList
###Output
_____no_output_____
###Markdown
2) Get the second and the last element of the list
###Code
print(shoppingList[1],shoppingList[-1])
###Output
carrots sprouts
###Markdown
3) Iterate though the list
###Code
for i in range(len(shoppingList)):
print("we should eat "+ shoppingList[i])
for product in shoppingList:
print('we should eat '+ product)
###Output
we should eat potatoes
we should eat carrots
we should eat cod
we should eat sprouts
###Markdown
4) Add the follwoing elements to the shoppingList: orange and lime
###Code
shoppingList.append("orange")
print(shoppingList)
shoppingList.append("lime")
print(shoppingList)
###Output
['potatoes', 'carrots', 'cod', 'sprouts', 'orange']
['potatoes', 'carrots', 'cod', 'sprouts', 'orange', 'lime']
###Markdown
6) Remove the carrots, the first element and last element of the shoppingList list
###Code
shoppingList.pop(0)
shoppingList.pop(-1)
shoppingList.remove('carrots')
shoppingList
###Output
_____no_output_____
###Markdown
6) How many fruits are there in the shopping list
###Code
len(shoppingList)
shoppingList= ['potatoes','carrots','cod','sprouts']
shoppingList
###Output
_____no_output_____
###Markdown
7) Obtain the first 3 elements of the list
###Code
print(shoppingList[:3])
###Output
['potatoes', 'carrots', 'cod']
###Markdown
8) Add the fruit "Ananas" in the 3rd position
###Code
shoppingList[2] = "Ananas"
shoppingList
###Output
_____no_output_____
###Markdown
9) Reverse the list
###Code
shoppingList.reverse()
shoppingList
###Output
_____no_output_____
###Markdown
10) Sort the list
###Code
shoppingList.sort()
shoppingList
###Output
_____no_output_____
###Markdown
11) What is the result of
###Code
shopping = shoppingList
shoppingListCopy = shoppingList[:]
print(shopping)
###Output
['Ananas', 'carrots', 'potatoes', 'sprouts']
###Markdown
As we copied shopping list to a new list , it printed exactly the same list.
###Code
###Output
_____no_output_____
###Markdown
12) What is the result of
###Code
shopping = shoppingList
shoppingList.append("orange")
print(shopping)
###Output
['Ananas', 'carrots', 'potatoes', 'sprouts', 'orange']
###Markdown
Because when we add orange to the list, new value will be added at the at the end of the list and it is not sorted. 13) remove all the items from the shoppingList
###Code
shoppingList.clear()
###Output
_____no_output_____
###Markdown
14) What is the result of
###Code
newPurchases= ["bananas", "beans", "rice"]
print (newPurchases [1])
newPurchases [0] = "apple"
newPurchases
###Output
_____no_output_____
###Markdown
15) Create a dictionary including the follwoing elements: orange,apple, pear, grape and peach.Key are 1 to 5. Iterate through key-value pair. The result must be "orange e number 1".
###Code
thisdict = {
1:"orange",
2:"apple",
3:"pear",
4:"grape",
5:"peach"
}
print(thisdict)
for key,value in thisdict.items():
print(value +" is number "+ str(key))
###Output
orange is number 1
apple is number 2
pear is number 3
grape is number 4
peach is number 5
###Markdown
16) Create a weekList that is composed of several lists, each one corresponding to a day.
###Code
day1 = ["monday"]
day2 = ["tuesday"]
day3 = ["wednesday"]
day4 = ["thursday"]
day5 = ["friday"]
day6= ["saturday"]
day7=["sunday"]
weeklist= day1+ day2+ day3+ day4+ day5 + day6 + day7
weeklist
###Output
_____no_output_____
###Markdown
17) Create a list where x is belongs to a list of values from 1 to 100 and you want to generate a new list of y.$y = 3x^2 +2x +4$
###Code
x = list(range(1,101))
y=[3*x**(2) +2*x+ 4 for x in x ]
y
###Output
_____no_output_____
###Markdown
CITS 5508 LAB SHEET 2: CLASSIFICATION ON FOREST TYPE MAPPING DATASET **Name: Thanh Duy Tang****Student Number: 22803018****Date created: 10th March 2020 **Last modified: 19th March 2020 1. SetupBefore going to loading data part, we need to make sure that MatplotLib figures inline and prepare a function to save the figures. Moreover, we should use Python 3.0 as well as Scikit-Learn >= 0.20
###Code
#Python >= 3.0 is required
import sys
assert sys.version_info >= (3,5)
#Scikit-Learn >= 0.20
import sklearn
assert sklearn.__version__ >= '0.20'
#Get the pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
#Save the figures
import os
LAB2_ROOT_DIR = "."
CHAPTER_ID = "LAB SHEET 2"
IMAGES_PATH = os.path.join(LAB2_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
#Ignore useless warnings
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
###Output
_____no_output_____
###Markdown
2. Loading dataFirst, we need to dowload the zip file from http://archive.ics.uci.edu/ml/datasets/Forest+type+mapping and extract two files csv training and testing into the same directory of this lab file.
###Code
#Load the Pandas libraries with alias "pd"
import os
import pandas as pd
#Read data from 2 files "traning.csv" and "testing.csv"
train_set = pd.read_csv("training.csv")
test_origin_set = pd.read_csv("testing.csv")
#Take a look at some lines of training dataset
train_set.head()
#Get an overview of training dataset
train_set.info()
train_set.describe()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 325 entries, 0 to 324
Data columns (total 28 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 class 325 non-null object
1 b1 325 non-null int64
2 b2 325 non-null int64
3 b3 325 non-null int64
4 b4 325 non-null int64
5 b5 325 non-null int64
6 b6 325 non-null int64
7 b7 325 non-null int64
8 b8 325 non-null int64
9 b9 325 non-null int64
10 pred_minus_obs_H_b1 325 non-null float64
11 pred_minus_obs_H_b2 325 non-null float64
12 pred_minus_obs_H_b3 325 non-null float64
13 pred_minus_obs_H_b4 325 non-null float64
14 pred_minus_obs_H_b5 325 non-null float64
15 pred_minus_obs_H_b6 325 non-null float64
16 pred_minus_obs_H_b7 325 non-null float64
17 pred_minus_obs_H_b8 325 non-null float64
18 pred_minus_obs_H_b9 325 non-null float64
19 pred_minus_obs_S_b1 325 non-null float64
20 pred_minus_obs_S_b2 325 non-null float64
21 pred_minus_obs_S_b3 325 non-null float64
22 pred_minus_obs_S_b4 325 non-null float64
23 pred_minus_obs_S_b5 325 non-null float64
24 pred_minus_obs_S_b6 325 non-null float64
25 pred_minus_obs_S_b7 325 non-null float64
26 pred_minus_obs_S_b8 325 non-null float64
27 pred_minus_obs_S_b9 325 non-null float64
dtypes: float64(18), int64(9), object(1)
memory usage: 71.2+ KB
###Markdown
There are 28 attributes which can be easily seen in the table above and the list below: class, b1, b2, b3, b4, b5, b6, b7, b8, b9, ..., pred_minus_obs_S_b9. There are 325 instances in the training set and 198 instances in test set which mean that the data size is relatively small comapred to the Machine Learning standards, but we can give it a try with small data size and then move to the bigger one. Both training and testing set have the same format. There are not any missing values in both dataset. All attributes are numerical, except "class".
###Code
##Visualize the training dataset
import matplotlib.pyplot as plt
train_set.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()
###Output
Saving figure attribute_histogram_plots
###Markdown
As we can notice few things in these histograms:1. Most of attributes have the same scales. We may not focus on those columns which are behind the column "b9"2. Most of instances of "b4", "b6", "b7" ranged from 80 to 120. 3. Other instances of other column ranged from 20 to 60.=> These histograms are easy for Machine Learning algorithms to detect patterns 3. Cleaning data
###Code
#Split training set into training set and test set. Original test set is kept for cross validation.
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(train_set, test_size = 0.2, random_state = 1)
#Drop all columns with the names begin with "pred_minus_obs".
import pandas as pd
train_set.drop(train_set.iloc[:, 10:], inplace = True, axis = 1)
test_set.drop(test_set.iloc[:, 10:], inplace = True, axis = 1)
test_origin_set.drop(test_set.iloc[:, 10:], inplace = True, axis = 1)
#Show a few lines of training dataset after drop out columns as required.
train_set.head()
#At this time, the datas of both set are clean as well.
#Count the number of instances for each class label and visualize the graph to easily see the pattern.
train_set['class'].value_counts()
#Visualize
%matplotlib inline
import matplotlib.pyplot as plt
train_set['class'].hist(bins=50, figsize=(20,15))
save_fig("class_histogram_plot")
plt.show()
###Output
Saving figure class_histogram_plot
###Markdown
As we can see, the instances between 4 classes are not fluctuated much. It ranges from round 40 to 140. So, this is a balance dataset 4. Normalization
###Code
#We need to take all numerical columns
train_set_num = train_set.drop(train_set.iloc[:,0:1], axis = 1)
test_set_num = test_set.drop(test_set.iloc[:,0:1], axis = 1)
test_origin_set_num = test_origin_set.drop(test_origin_set.iloc[:,0:1], axis = 1)
train_set_num.head()
#Find the correlation between attributes
corr_matrix = train_set.corr()
corr_matrix["b1"].sort_values(ascending = False)
#Plot scatter matrix with pandas
from pandas.plotting import scatter_matrix
attributes = ["b1","b4","b7","b9"]
scatter_matrix(train_set[attributes], figsize = (12,8))
save_fig("scatter_matrix_plot")
###Output
Saving figure scatter_matrix_plot
###Markdown
As we can see, b4 and b7 are the promising attribute to predict b1.There are upward trends in this scatterplot. This data size is really small, the points somehow are dispeared. However, the correlation of these attributes with b1 are relatively high. To conclude that, we should try to remove some irrelated attributes to prevent algorithms for quick data reproduction.
###Code
#Zoom in the most correlated attributes on the scatterplot
train_set.plot(kind ="scatter", x ="b1", y="b4")
train_set.plot(kind ="scatter", x="b1", y="b7")
save_fig("b1_b4_vs_b7_value_scatterplot")
#Excute the code of the categorical columns of training and testing set
train_set_cat = train_set["class"]
test_set_cat = test_set["class"]
test_origin_cat = test_origin_set["class"]
train_set_cat
#Transform all numerical columns by using StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('std_scaler', StandardScaler())
])
train_set_num_tr = num_pipeline.fit_transform(train_set_num)
test_set_num_tr = num_pipeline.fit_transform(test_set_num)
test_origin_num_tr = num_pipeline.fit_transform(test_origin_set_num)
train_set_num_tr
test_set_num_tr
###Output
_____no_output_____
###Markdown
5. Classification 5.1 Support Vector Machine
###Code
#Assign x_train, y_train, x_test, y_test and apply SVM into this
from sklearn.svm import SVC
x_train, y_train, x_test, y_test = train_set_num_tr, train_set_cat, test_set_num_tr, test_set_cat
svm_clf = SVC(gamma="auto", random_state=1)
svm_clf.fit(x_train, y_train)
svm_y_pred = svm_clf.predict(x_test)
#Draw the confusion matrix of svm_y_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred)
#This code shows the label of the confusion matrix above.
svm_clf.classes_
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 21 values were correctly classified as "d". - 4 values were correctly classified as "h".- 7 values were correctly classified as "o".- 22 values were correctly classified as "s".- Read down to the "h" row, 1 value that should have been "s" were classified as "d".- For the "h" row, 3 values that should have been "s" were classified as "h"- 2 values from "o" row were classified as "o", but they must have been "d".- 5 values were classified as "s", but they must have been respectively "d"(2) and "h"(3).
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred)
###Output
_____no_output_____
###Markdown
The accuracy is 83.08%, this ratio is quite good when it can predicted classes correctly. 5.2 Experimenting with some hyperparameters Kernel = sigmoid
###Code
#Use kernel : sigmoid for classification
svm_clf_sig = SVC(kernel="sigmoid", random_state = 1)
svm_clf_sig.fit(x_train, y_train)
svm_y_pred_sig = svm_clf_sig.predict(x_test)
svm_y_pred_sig
#Draw the confusion matrix of svm_y_pred_sig
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_sig)
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 16 values were correctly classified as "d". - 7 values were correctly classified as "h".- 46 values were correctly classified as "o".- 23 values were correctly classified as "s".- Read down to the "h" row, 6 values that should have been "o"(3) and "s"(3) were classified as "d".- For the "h" row, there is no any wrong classification in this row.- 5 values from "o" row were classified as "o", but they must have been "d"(5)- 4 values were classified as "s", but they must have been respectively "h"(2)
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_sig)
###Output
_____no_output_____
###Markdown
The accuracy is 76.92%, this ratio is quite low, so that it is not reliable for further step. Kernel = linear
###Code
#Use kernel : linear for classification
svm_clf_lin = SVC(kernel="linear", random_state = 1)
svm_clf_lin.fit(x_train, y_train)
svm_y_pred_lin = svm_clf_lin.predict(x_test)
svm_y_pred_lin
#Draw the confusion matrix of svm_y_pred_lin
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_lin)
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 21 values were correctly classified as "d". - 5 values were correctly classified as "h".- 8 values were correctly classified as "o".- 23 values were correctly classified as "s".- Read down to the "d" row, 1 value that should have been "s" were classified as "d".- For the "h" row, 2 values that should have been "s" were classified as "h".- 1 value from "o" row were classified as "o", but they must have been "d".- 4 values were classified as "s", but they must have been respectively "d"(2) and "h"(2).
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_lin)
###Output
_____no_output_____
###Markdown
The accuracy is 87.69%, this ratio is more higher than SVC with gamma ="auto", kernel = "poly" and also kernel = "sigmoid" when it can predicted classes correctly. It seems to exceed a perfect rate (90%). We can do more with this. Kernel = "poly"
###Code
#Use kernel :polynomial for classification
svm_clf_poly = SVC(kernel="poly", degree = 3, random_state = 1)
svm_clf_poly.fit(x_train, y_train)
svm_y_pred_poly = svm_clf_poly.predict(x_test)
svm_y_pred_poly
#Draw the confusion matrix of svm_y_pred_poly
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, svm_y_pred_poly)
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 9 values were correctly classified as "d". - 4 values were correctly classified as "h".- 7 values were correctly classified as "o".- 26 values were correctly classified as "s".- Read down to the "d" row, 13 values that should have been "s" were classified as "d".- For the "h" row, 3 values that should have been "s" were classified as "h".- 2 values from "o" row were classified as "o", but they must have been "d"(1) and "s"(1).- 1 values were classified as "s", but they must have been respectively "d"(1).
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, svm_y_pred_poly)
###Output
_____no_output_____
###Markdown
The accuracy is 70.77%, this ratio is the lowest one when it can predicted classes correctly. Stochastic Gradient Descent
###Code
#Apply Stochastic Gradient Descent Classifier into dataset. loss = hinge
from sklearn import linear_model
sgd_clf_hinge = linear_model.SGDClassifier(loss = "hinge", random_state = 1)
sgd_clf_hinge.fit(x_train, y_train)
sgd_y_pred_hinge = sgd_clf_hinge.predict(x_test)
sgd_y_pred_hinge
#Draw the confusion matrix of svm_y_pred_hinge
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_hinge)
#This code shows the label of the confusion matrix above.
sgd_clf_hinge.classes_
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 18 values were correctly classified as "d". - 4 values were correctly classified as "h".- 8 values were correctly classified as "o".- 25 values were correctly classified as "s".- Read down to the "d" row, 4 values that should have been "o"(1) and "s"(3) were classified as "d".- For the "h" row, 3 values that should have been "s" were classified as "h"- 1 value from "o" row were classified as "o", but they must have been "d".- 2 values were classified as "s", but they must have been "d"
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_hinge)
###Output
_____no_output_____
###Markdown
The accuracy is 84.62%, this ratio is quite good when it can predicted classes correctly. Loss = "log"
###Code
#Use loss: log for classification
from sklearn import linear_model
sgd_clf_log = linear_model.SGDClassifier(loss ='log', random_state = 1)
sgd_clf_log.fit(x_train, y_train)
sgd_y_pred_log = sgd_clf_log.predict(x_test)
sgd_y_pred_log
#Draw the confusion matrix of svm_y_pred_log
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_log)
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 19 values were correctly classified as "d". - 4 values were correctly classified as "h".- 8 values were correctly classified as "o".- 20 values were correctly classified as "s".- Read down to the "d" row, 3 values that should have been "o"(1) and "s"(2) were classified as "d".- For the "h" row, 3 values that should have been "d" were classified as "s".- 1 value from "o" row were classified as "o", but they must have been "d".- 7 values were classified as "s", but they must have been "d"(5) and "h"(2).
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_log)
###Output
_____no_output_____
###Markdown
The accuracy is 78.46%, this ratio is not good because its accuracy is below 80% when it can predicted classes correctly. Loss = "modified_huber"
###Code
#Use modified_huber for classification
from sklearn import linear_model
sgd_clf_hub = linear_model.SGDClassifier(loss ='modified_huber', random_state = 1)
sgd_clf_hub.fit(x_train, y_train)
sgd_y_pred_hub = sgd_clf_hub.predict(x_test)
sgd_y_pred_hub
#Draw the confusion matrix of svm_y_pred_hub
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, sgd_y_pred_hub)
###Output
_____no_output_____
###Markdown
Here are some comments about the confusion matrix:- 18 values were correctly classified as "d". - 4 values were correctly classified as "h".- 8 values were correctly classified as "o".- 23 values were correctly classified as "s".- Read down to the "d" row, 4 values that should have been "o"(1) and "s"(3) were classified as "d".- For the "h" row, 3 values that should have been "s" were classified as "h".- 1 value from "o" row were classified as "o", but they must have been "d".- 4 values were classified as "s", but they must have been "d"
###Code
#Compute accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, sgd_y_pred_hub)
###Output
_____no_output_____
###Markdown
The accuracy is 81.54% this ratio is quite good when it can predicted classes correctly. Cross Validation
###Code
from sklearn.model_selection import cross_val_score
cross_val_score(svm_clf_lin, test_origin_num_tr, test_origin_cat, cv=3, scoring="accuracy")
###Output
_____no_output_____ |
ML-101 Modules/Module 03/Lesson 02/Practice 2/Winequality - Practice Code Part 1&2.ipynb | ###Markdown
"Wine Quality." Part 1: Import, Load Data. * Import libraries
###Code
# import standard libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
%matplotlib inline
sns.set()
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
* Read data from ‘.csv’ file
###Code
# read data from '.csv' file
dataset = pd.read_csv('winequality.csv')
###Output
_____no_output_____
###Markdown
Part 2: Exploratory Data Analysis. * Info
###Code
# print the full summary of the dataset
dataset.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4898 entries, 0 to 4897
Data columns (total 12 columns):
fixed acidity 4898 non-null float64
volatile acidity 4898 non-null float64
citric acid 4898 non-null float64
residual sugar 4898 non-null float64
chlorides 4898 non-null float64
free sulfur dioxide 4898 non-null float64
total sulfur dioxide 4898 non-null float64
density 4898 non-null float64
pH 4898 non-null float64
sulphates 4898 non-null float64
alcohol 4898 non-null float64
quality 4898 non-null int64
dtypes: float64(11), int64(1)
memory usage: 459.2 KB
###Markdown
Dataset consists of 4898 rows and 12 columns; has 2 datatypes: float64(11), int64(1);has no missing values. * Head
###Code
# preview of the first 5 lines of the loaded data
dataset.head()
###Output
_____no_output_____
###Markdown
* Describe
###Code
dataset.describe()
###Output
_____no_output_____
###Markdown
Предположим, вам дали такой датасет и поставили конктетный вопрос: классифицируйте какие вина хорошие, а какие нет?У вас нет атрибута "Y" и ответа. Но есть хороший вспомогательный атрибут "quality" из которого мы сможем создать наш атрибут "Y" с ответом для обучения модели.Атрибут "quality" имеет значения от 3 до 9, где 3 это "Not Good", а 9 это "Good" качество вина. Чем выше число, тем лучше вино. * Encoding 'quality' attribute
###Code
# lambda function; wine quality from 3-6 == 0, from 7-9 == 1.
dataset['quality'] = dataset.quality.apply(lambda q: 0 if q <= 6 else 1)
# preview of the first 5 lines of the loaded data
dataset.head()
###Output
_____no_output_____
###Markdown
* 'quality' attribute value counts and visualisation
###Code
print('Not good wine', round(dataset['quality'].value_counts()[0]/len(dataset) * 100,2), '% of the dataset')
print('Good wine', round(dataset['quality'].value_counts()[1]/len(dataset) * 100,2), '% of the dataset')
dataset['quality'].value_counts()
# visualisation plot
dataset['quality'].value_counts().plot(x = dataset['quality'], kind='bar')
###Output
_____no_output_____
###Markdown
There are 78.36 % of 'Not Good' quality wines and only 21.64 % of 'Good' quality wines in our dataset. This means that our dataset is imbalanced. * Resampling of an imbalanced dataset
###Code
# class count
#count_class_0, count_class_1 = dataset.quality.value_counts()
# divide by class
#class_0 = dataset[dataset['quality'] == 0]
#class_1 = dataset[dataset['quality'] == 1]
###Output
_____no_output_____
###Markdown
* Random under-sampling of an imbalanced dataset
###Code
#class_0_under = class_0.sample(count_class_1)
#dataset_under = pd.concat([class_0_under, class_1], axis=0)
#print('Random under-sampling:')
#print(dataset_under.quality.value_counts())
#dataset_under.quality.value_counts().plot(kind='bar', title='Count (target)');
###Output
_____no_output_____
###Markdown
* Random over-sampling of an imbalanced dataset
###Code
#class_1_over = class_1.sample(count_class_0, replace=True)
#dataset_over = pd.concat([class_0, class_1_over], axis=0)
#print('Random over-sampling:')
#print(dataset_over.quality.value_counts())
#dataset_over.quality.value_counts().plot(kind='bar', title='Count (target)');
###Output
_____no_output_____
###Markdown
* Initialisation of target
###Code
# initialisation of target
target = dataset['quality']
# for under-sampling dataset
#target_under = dataset_under['quality']
# for over-sampling dataset
#target_over = dataset_over['quality']
###Output
_____no_output_____
###Markdown
* Drop column 'quality'
###Code
dataset = dataset.drop(columns=['quality'])
# for under-sampling dataset
#dataset_under = dataset_under.drop(columns=['quality'])
# for over-sampling dataset
#dataset_over = dataset_over.drop(columns=['quality'])
###Output
_____no_output_____
###Markdown
"Wine Quality." _"Quality ratings of Portuguese white wines" (Classification task)._ Table of Contents Part 0: Introduction OverviewThe dataset that's we see here contains 12 columns and 4898 entries of data about Portuguese white wines. **Метаданные:** * **fixed acidity** * **volatile acidity*** **citric acid** * **residual sugar** * **chlorides** * **free sulfur dioxide** * **total sulfur dioxide*** **density** * **pH** * **sulphates** * **alcohol** * **quality** - score between 3 and 9 Questions: Predict which wines are 'Good/1' and 'Not Good/0' (use binary classification; check balance of classes; calculate perdictions; choose the best model) [Part 1: Import, Load Data](Part-1:-Import,-Load-Data.)* Import libraries, Read data from ‘.csv’ file [Part 2: Exploratory Data Analysis](Part-2:-Exploratory-Data-Analysis.)* Info, Head, Describe* Encoding 'quality' attribute* 'quality' attribute value counts and visualisation* Resampling of an imbalanced dataset* Random under-sampling of an imbalanced dataset* Random over-sampling of an imbalanced dataset [Part 3: Data Wrangling and Transformation](Part-3:-Data-Wrangling-and-Transformation.)* Creating datasets for ML part* StandardScaler* 'Train\Test' splitting method [Part 4: Machine Learning](Part-4:-Machine-Learning.)* Build, train and evaluate models without hyperparameters * Logistic Regression, K-Nearest Neighbors, Decision Trees * Classification report * Confusion Matrix * ROC-AUC score* Build, train and evaluate models with hyperparameters * Logistic Regression, K-Nearest Neighbors, Decision Trees * Classification report * Confusion Matrix * ROC-AUC score [Conclusion](Conclusion.) Part 1: Import, Load Data. * Import libraries
###Code
# import standard libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
%matplotlib inline
sns.set()
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
* Read data from ‘.csv’ file
###Code
# read data from '.csv' file
dataset = pd.read_csv('winequality.csv')
###Output
_____no_output_____
###Markdown
Part 2: Exploratory Data Analysis. * Info
###Code
# print the full summary of the dataset
dataset.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4898 entries, 0 to 4897
Data columns (total 12 columns):
fixed acidity 4898 non-null float64
volatile acidity 4898 non-null float64
citric acid 4898 non-null float64
residual sugar 4898 non-null float64
chlorides 4898 non-null float64
free sulfur dioxide 4898 non-null float64
total sulfur dioxide 4898 non-null float64
density 4898 non-null float64
pH 4898 non-null float64
sulphates 4898 non-null float64
alcohol 4898 non-null float64
quality 4898 non-null int64
dtypes: float64(11), int64(1)
memory usage: 459.2 KB
###Markdown
Dataset consists of 4898 rows and 12 columns; has 2 datatypes: float64(11), int64(1);has no missing values. * Head
###Code
# preview of the first 5 lines of the loaded data
dataset.head()
###Output
_____no_output_____
###Markdown
* Describe
###Code
dataset.describe()
###Output
_____no_output_____
###Markdown
Предположим, вам дали такой датасет и поставили конктетный вопрос: классифицируйте какие вина хорошие, а какие нет?У вас нет атрибута "Y" и ответа. Но есть хороший вспомогательный атрибут "quality" из которого мы сможем создать наш атрибут "Y" с ответом для обучения модели.Атрибут "quality" имеет значения от 3 до 9, где 3 это "Not Good", а 9 это "Good" качество вина. Чем выше число, тем лучше вино. * Encoding 'quality' attribute
###Code
# lambda function; wine quality from 3-6 == 0, from 7-9 == 1.
dataset['quality'] = dataset.quality.apply(lambda q: 0 if q <= 6 else 1)
# preview of the first 5 lines of the loaded data
dataset.head()
###Output
_____no_output_____
###Markdown
* 'quality' attribute value counts and visualisation
###Code
print('Not good wine', round(dataset['quality'].value_counts()[0]/len(dataset) * 100,2), '% of the dataset')
print('Good wine', round(dataset['quality'].value_counts()[1]/len(dataset) * 100,2), '% of the dataset')
dataset['quality'].value_counts()
# visualisation plot
dataset['quality'].value_counts().plot(x = dataset['quality'], kind='bar')
###Output
_____no_output_____
###Markdown
There are 78.36 % of 'Not Good' quality wines and only 21.64 % of 'Good' quality wines in our dataset. This means that our dataset is imbalanced. * Resampling of an imbalanced dataset
###Code
# class count
#count_class_0, count_class_1 = dataset.quality.value_counts()
# divide by class
#class_0 = dataset[dataset['quality'] == 0]
#class_1 = dataset[dataset['quality'] == 1]
###Output
_____no_output_____
###Markdown
* Random under-sampling of an imbalanced dataset
###Code
#class_0_under = class_0.sample(count_class_1)
#dataset_under = pd.concat([class_0_under, class_1], axis=0)
#print('Random under-sampling:')
#print(dataset_under.quality.value_counts())
#dataset_under.quality.value_counts().plot(kind='bar', title='Count (target)');
###Output
_____no_output_____
###Markdown
* Random over-sampling of an imbalanced dataset
###Code
#class_1_over = class_1.sample(count_class_0, replace=True)
#dataset_over = pd.concat([class_0, class_1_over], axis=0)
#print('Random over-sampling:')
#print(dataset_over.quality.value_counts())
#dataset_over.quality.value_counts().plot(kind='bar', title='Count (target)');
###Output
_____no_output_____ |
R/12-1 Helpdesk - Easy.ipynb | ###Markdown
Help Desk - Easy ScenarioA software company has been successful in selling its products to a number of customer organisations, and there is now a high demand for technical support. There is already a system in place for logging support calls taken over the telephone and assigning them to engineers, but it is based on a series of spreadsheets. With the growing volume of data, using the spreadsheet system is becoming slow, and there is a significant risk that errors will be made.
###Code
library(tidyverse)
library(DBI)
library(getPass)
drv <- switch(Sys.info()['sysname'],
Windows="PostgreSQL Unicode(x64)",
Darwin="/usr/local/lib/psqlodbcw.so",
Linux="PostgreSQL")
con <- dbConnect(
odbc::odbc(),
driver = drv,
Server = "localhost",
Database = "sqlzoo",
UID = "postgres",
PWD = getPass("Password?"),
Port = 5432
)
options(repr.matrix.max.rows=20)
###Output
-- [1mAttaching packages[22m --------------------------------------- tidyverse 1.3.0 --
[32mv[39m [34mggplot2[39m 3.3.0 [32mv[39m [34mpurrr [39m 0.3.4
[32mv[39m [34mtibble [39m 3.0.1 [32mv[39m [34mdplyr [39m 0.8.5
[32mv[39m [34mtidyr [39m 1.0.2 [32mv[39m [34mstringr[39m 1.4.0
[32mv[39m [34mreadr [39m 1.3.1 [32mv[39m [34mforcats[39m 0.5.0
-- [1mConflicts[22m ------------------------------------------ tidyverse_conflicts() --
[31mx[39m [34mdplyr[39m::[32mfilter()[39m masks [34mstats[39m::filter()
[31mx[39m [34mdplyr[39m::[32mlag()[39m masks [34mstats[39m::lag()
###Markdown
1.There are three issues that include the words "index" and "Oracle". Find the call_date for each of them```+---------------------+----------+| call_date | call_ref |+---------------------+----------+| 2017-08-12 16:00:00 | 1308 || 2017-08-16 14:54:00 | 1697 || 2017-08-16 19:12:00 | 1731 |+---------------------+----------+```
###Code
shift <- dbReadTable(con, 'Shift')
staff <- dbReadTable(con, 'Staff')
issue <- dbReadTable(con, 'Issue')
shift_type <- dbReadTable(con, 'Shift_type')
level <- dbReadTable(con, 'Level')
customer <- dbReadTable(con, 'Customer')
caller <- dbReadTable(con, 'Caller')
issue %>%
filter(str_detect(Detail, 'index') &
str_detect(Detail, 'Oracle')) %>%
select(Call_date, Call_ref)
###Output
_____no_output_____
###Markdown
2.Samantha Hall made three calls on 2017-08-14. Show the date and time for each```+---------------------+------------+-----------+| call_date | first_name | last_name |+---------------------+------------+-----------+| 2017-08-14 10:10:00 | Samantha | Hall || 2017-08-14 10:49:00 | Samantha | Hall || 2017-08-14 18:18:00 | Samantha | Hall |+---------------------+------------+-----------+```
###Code
issue %>%
inner_join(caller, by=c(Caller_id="Caller_id")) %>%
filter(First_name=='Samantha' &
Last_name=='Hall' &
as.Date(Call_date)==as.Date('2017-08-14')) %>%
select(Call_date, First_name, Last_name)
###Output
_____no_output_____
###Markdown
3.There are 500 calls in the system (roughly). Write a query that shows the number that have each status.```+--------+--------+| status | Volume |+--------+--------+| Closed | 486 || Open | 10 |+--------+--------+```
###Code
issue %>%
group_by(Status) %>%
summarise(volume=n())
###Output
_____no_output_____
###Markdown
4.Calls are not normally assigned to a manager but it does happen. How many calls have been assigned to staff who are at Manager Level?```+------+| mlcc |+------+| 51 |+------+```
###Code
issue %>%
inner_join(staff, by=c(Assigned_to="Staff_code")) %>%
inner_join(level, by=c(Level_code="Level_code")) %>%
filter(Manager=='Y') %>%
tally %>%
rename(mlcc='n')
###Output
_____no_output_____
###Markdown
5.Show the manager for each shift. Your output should include the shift date and type; also the first and last name of the manager.```+------------+------------+------------+-----------+| Shift_date | Shift_type | first_name | last_name |+------------+------------+------------+-----------+| 2017-08-12 | Early | Logan | Butler || 2017-08-12 | Late | Ava | Ellis || 2017-08-13 | Early | Ava | Ellis || 2017-08-13 | Late | Ava | Ellis || 2017-08-14 | Early | Logan | Butler || 2017-08-14 | Late | Logan | Butler || 2017-08-15 | Early | Logan | Butler || 2017-08-15 | Late | Logan | Butler || 2017-08-16 | Early | Logan | Butler || 2017-08-16 | Late | Logan | Butler |+------------+------------+------------+-----------+```
###Code
shift %>%
inner_join(staff, by=c(Manager="Staff_code")) %>%
mutate(Shift_date=as.Date(Shift_date)) %>%
distinct(Shift_date, Shift_type, First_name, Last_name) %>%
arrange(Shift_date, Shift_type)
dbDisconnect(con)
###Output
_____no_output_____ |
DRL/log_analysis/Reward-function-validation-progress.ipynb | ###Markdown
Reward Function Validation - Progress with Time PenaltyReward functions can easily get complex to visualize once it starts taking a number of factors into account. This notebook will help with a quick validation by running it over the track and a number of
###Code
# Load the track to exercise
from math import log
#Shapely Library
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
%matplotlib inline
TRACK_NAME = 'Canada_Training'
# Helper functions
# Visualize the Track and Waypoints
# Tracks Available::
# AWS_track Straight_track Oval_track
# Bowtie_track H_track reinvent_base
# China_track Mexico_track Canada_training
waypoints = np.load("tracks/%s.npy" % TRACK_NAME)
print("Waypoints shape: {}".format(waypoints.shape))
center_line = waypoints[:,0:2]
inner_border = waypoints[:,2:4]
outer_border = waypoints[:,4:6]
l_center_line = LineString(waypoints[:,0:2])
l_inner_border = LineString(waypoints[:,2:4])
l_outer_border = LineString(waypoints[:,4:6])
road_poly = Polygon(np.vstack((l_outer_border, np.flipud(l_inner_border))))
# Print some useful track analysis data
print("Centerline length: %0.2f" % l_center_line.length)
print("Track bounds: ", road_poly.bounds)
print(waypoints[0])
print("Track width: ", np.linalg.norm(waypoints[0,4:6] - waypoints[0,2:4]))
#print("Track y min/max: ", pd.DataFrame(outer_border)['y'])
road_poly
# scan the track and invoke the reward function for each waypoint
# Sections of Canada_Training
plt.plot(center_line[:,0:1], center_line[:,1:2])
plt.plot(inner_border[:,0:1], inner_border[:,1:2])
plt.plot(outer_border[:,0:1], outer_border[:,1:2])
plt.axis('equal')
plt.annotate('0', center_line[0])
# Straight
plt.annotate('20', center_line[20])
# Sharp left
plt.annotate('40', center_line[40])
# Straight
plt.annotate('85', center_line[85])
# Easy left
plt.annotate('95', center_line[95])
# Straight
plt.annotate('130', center_line[130])
# Easy left
plt.annotate('150', center_line[150])
# Straight
plt.annotate('200', center_line[200])
###Output
_____no_output_____
###Markdown
Reward function code here
###Code
"""
AWS DeepRacer reward function using only progress
NOTE: This is great for maximizing individual step rewards, but the
total episode reward will always be 100.
"""
# Globals
g_last_progress_value = 0.0
#===============================================================================
#
# REWARD
#
#
# Things that don't work:
#
# progress_delta / time_delta => leads to exponential rewards as car goes faster
# progress_delta / progress_total => increased rewards for longer episodes
#
# progress_delta +
#===============================================================================
def reward_function(params):
reward = progress_factor_delta(params) * time_factor_steps(params)
return float(max(reward, 1e-3)) # make sure we never return exactly zero
def time_factor_steps(params):
# Discount by number of elapsed steps
if params['steps'] == 0:
return 1.0
else:
# step 2 discounted 50%
# desired scale: [1.0,0] over range of [0,200] steps (200 being a ~13s lap)
#
# Linear decay function: y = 1 + (1-x)/(200)
return 1.0 + (1 - params['steps'])/200
def progress_factor_delta(params):
# Progress range: 0..100
# Step is roughly a 1/15s timeslice so can account for time-factor
# Expected real value: [0,~1.0]
global g_last_progress_value
# Simple reward for outlier case of first step in the episode
if params['steps'] == 0:
reward = 1e-3
else:
reward = params['progress'] - g_last_progress_value
g_last_progress_value = params['progress']
return reward
def progress_factor_absolute(params):
return params['progress']
###Output
_____no_output_____
###Markdown
Test the reward function
###Code
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
def rewards_for_episode(max_progress, nsteps):
episode_reward = 0.0
step_rewards = list()
for i in range(nsteps+1):
progress = i * (max_progress / nsteps)
reward = reward_function({'progress': progress, 'steps': i})
episode_reward += reward
step_rewards.append((i, progress, reward, episode_reward))
# return pd.DataFrame(step_rewards, columns=('step', 'progress', 'reward', 'total_reward'))
return step_rewards
#rewards_for_episode(10,10).tail(1)['total_reward'].values[0]
# make a plot of rewards
episode_rewards = list()
for progress in range(1,10):
for steps in range(5,15):
reward = rewards_for_episode(progress,steps)
#print(reward)
episode_rewards.append(reward[-1])
plot_df = pd.DataFrame(episode_rewards, columns=('step', 'progress', 'reward', 'total_reward'))
plot_df.head(50)
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_trisurf(plot_df['step'], plot_df['progress'], plot_df['total_reward'], cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
#fig.colorbar(surf, shrink=0.5, aspect=5)
#plt.show()
###Output
_____no_output_____ |
ai-platform-unified/notebooks/unofficial/matching_engine/matching_engine_for_indexing.ipynb | ###Markdown
Run in AI Platform Notebooks View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* AI Platform (Unified)* Cloud StorageLearn about [AI Platform (Unified)pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/ai-platform-unified/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in an AI Platform Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). * If you run it in the colab or an AI Platform Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in AI Platform Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the AI Platform(Unified) client library.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified) API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the AI Platform (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter='project_id:$PROJECT_ID' --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = str(PROJECT_NUMBER).strip("[").strip("]").strip("'")
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ",".join(str(x) for x in train[i]) + "]}")
f.write("\n")
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7),
}
)
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=Struct())}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(
parent=PARENT, index_endpoint=index_endpoint
)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = (
list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0]
.deployed_indexes[0]
.private_endpoints.match_grpc_address
)
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum(
[
len(
set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(
set(get_neighbors(test[i], DEPLOYED_INDEX_ID))
)
)
for i in range(len(test))
]
) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Run in AI Platform Notebooks View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* AI Platform (Unified)* Cloud StorageLearn about [AI Platform (Unified)pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/ai-platform-unified/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in an AI Platform Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). * If you run it in the colab or an AI Platform Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in AI Platform Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the AI Platform(Unified) client library.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified) API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the AI Platform (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter='project_id:$PROJECT_ID' --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = str(PROJECT_NUMBER).strip('[').strip(']').strip("'")
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ','.join(str(x) for x in train[i]) + ']}')
f.write('\n')
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(client_options=dict(api_endpoint=ENDPOINT))
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7)})
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)})
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME)}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=Struct())})
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME)}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(client_options=dict(api_endpoint=ENDPOINT))
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(parent=PARENT, index_endpoint=index_endpoint)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0].deployed_indexes[0].private_endpoints.match_grpc_address
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [-0.11333, 0.48402, 0.090771, -0.22439, 0.034206, -0.55831, 0.041849, -0.53573, 0.18809, -0.58722, 0.015313, -0.014555, 0.80842, -0.038519, 0.75348, 0.70502, -0.17863, 0.3222, 0.67575, 0.67198, 0.26044, 0.4187, -0.34122, 0.2286, -0.53529, 1.2582, -0.091543, 0.19716, -0.037454, -0.3336, 0.31399, 0.36488, 0.71263, 0.1307, -0.24654, -0.52445, -0.036091, 0.55068, 0.10017, 0.48095, 0.71104, -0.053462, 0.22325, 0.30917, -0.39926, 0.036634, -0.35431, -0.42795, 0.46444, 0.25586, 0.68257, -0.20821, 0.38433, 0.055773, -0.2539, -0.20804, 0.52522, -0.11399, -0.3253, -0.44104, 0.17528, 0.62255, 0.50237, -0.7607, -0.071786, 0.0080131, -0.13286, 0.50097, 0.18824, -0.54722, -0.42664, 0.4292, 0.14877, -0.0072514, -0.16484, -0.059798, 0.9895, -0.61738, 0.054169, 0.48424, -0.35084, -0.27053, 0.37829, 0.11503, -0.39613, 0.24266, 0.39147, -0.075256, 0.65093, -0.20822, -0.17456, 0.53571, -0.16537, 0.13582, -0.56016, 0.016964, 0.1277, 0.94071, -0.22608, -0.021106]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum([len(set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(set(get_neighbors(test[i], DEPLOYED_INDEX_ID)))) for i in range(len(test))]) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Run in Colab View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* AI Platform (Unified)* Cloud StorageLearn about [AI Platform (Unified)pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/ai-platform-unified/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in an AI Platform Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). * If you run it in the colab or an AI Platform Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in AI Platform Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the AI Platform(Unified) client library.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the AI Platform (Unified) API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services areavailable](https://cloud.google.com/ai-platform-unified/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with AI Platform.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the AI Platform (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter='project_id:$PROJECT_ID' --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = str(PROJECT_NUMBER).strip('[').strip(']').strip("'")
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ','.join(str(x) for x in train[i]) + ']}')
f.write('\n')
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(client_options=dict(api_endpoint=ENDPOINT))
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7)})
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)})
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME)}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=Struct())})
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME)}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(client_options=dict(api_endpoint=ENDPOINT))
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(parent=PARENT, index_endpoint=index_endpoint)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0].deployed_indexes[0].private_endpoints.match_grpc_address
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [-0.11333, 0.48402, 0.090771, -0.22439, 0.034206, -0.55831, 0.041849, -0.53573, 0.18809, -0.58722, 0.015313, -0.014555, 0.80842, -0.038519, 0.75348, 0.70502, -0.17863, 0.3222, 0.67575, 0.67198, 0.26044, 0.4187, -0.34122, 0.2286, -0.53529, 1.2582, -0.091543, 0.19716, -0.037454, -0.3336, 0.31399, 0.36488, 0.71263, 0.1307, -0.24654, -0.52445, -0.036091, 0.55068, 0.10017, 0.48095, 0.71104, -0.053462, 0.22325, 0.30917, -0.39926, 0.036634, -0.35431, -0.42795, 0.46444, 0.25586, 0.68257, -0.20821, 0.38433, 0.055773, -0.2539, -0.20804, 0.52522, -0.11399, -0.3253, -0.44104, 0.17528, 0.62255, 0.50237, -0.7607, -0.071786, 0.0080131, -0.13286, 0.50097, 0.18824, -0.54722, -0.42664, 0.4292, 0.14877, -0.0072514, -0.16484, -0.059798, 0.9895, -0.61738, 0.054169, 0.48424, -0.35084, -0.27053, 0.37829, 0.11503, -0.39613, 0.24266, 0.39147, -0.075256, 0.65093, -0.20822, -0.17456, 0.53571, -0.16537, 0.13582, -0.56016, 0.016964, 0.1277, 0.94071, -0.22608, -0.021106]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum([len(set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(set(get_neighbors(test[i], DEPLOYED_INDEX_ID)))) for i in range(len(test))]) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Run in Google Cloud Notebooks View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/vertex-ai/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in a Google Cloud Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). That is, you will have to create the VPC network below and then create a new notebook instance that uses that VPC. * If you run it in the colab or a Google Cloud Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in Google Cloud Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the Vertex SDK for Python.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the Vertex AI (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter="PROJECT_ID:'{PROJECT_ID}'" --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = PROJECT_NUMBER[0]
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ",".join(str(x) for x in train[i]) + "]}")
f.write("\n")
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7),
}
)
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
from google.protobuf import *
algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=struct_pb2.Struct())}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Update IndexesCreate incremental data file.
###Code
with open("glove100_incremental.json", "w") as f:
f.write(
'{"id":"0","embedding":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}\n'
)
###Output
_____no_output_____
###Markdown
Copy the incremental data file to a new subdirectory.
###Code
! gsutil cp glove100_incremental.json {BUCKET_NAME}/incremental/glove100.json
###Output
_____no_output_____
###Markdown
Create update index request
###Code
metadata = struct_pb2.Struct(
fields={
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME + "/incremental"),
}
)
ann_index = {
"name": INDEX_RESOURCE_NAME,
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.update_index(index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to update index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(
parent=PARENT, index_endpoint=index_endpoint
)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = (
list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0]
.deployed_indexes[0]
.private_endpoints.match_grpc_address
)
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Batch QueryYou can run multiple queries in a single RPC call using the BatchMatch API:
###Code
def get_request(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
return request
# Test query
queries = [
[
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
],
[
-0.99544,
-2.3651,
-0.24332,
-1.0321,
0.42052,
-1.1817,
-0.16451,
-1.683,
0.49673,
-0.27258,
-0.025397,
0.34188,
1.5523,
1.3532,
0.33297,
-0.0056677,
-0.76525,
0.49587,
1.2211,
0.83394,
-0.20031,
-0.59657,
0.38485,
-0.23487,
-1.0725,
0.95856,
0.16161,
-1.2496,
1.6751,
0.73899,
0.051347,
-0.42702,
0.16257,
-0.16772,
0.40146,
0.29837,
0.96204,
-0.36232,
-0.47848,
0.78278,
0.14834,
1.3407,
0.47834,
-0.39083,
-1.037,
-0.24643,
-0.75841,
0.7669,
-0.37363,
0.52741,
0.018563,
-0.51301,
0.97674,
0.55232,
1.1584,
0.73715,
1.3055,
-0.44743,
-0.15961,
0.85006,
-0.34092,
-0.67667,
0.2317,
1.5582,
1.2308,
-0.62213,
-0.032801,
0.1206,
-0.25899,
-0.02756,
-0.52814,
-0.93523,
0.58434,
-0.24799,
0.37692,
0.86527,
0.069626,
1.3096,
0.29975,
-1.3651,
-0.32048,
-0.13741,
0.33329,
-1.9113,
-0.60222,
-0.23921,
0.12664,
-0.47961,
-0.89531,
0.62054,
0.40869,
-0.08503,
0.6413,
-0.84044,
-0.74325,
-0.19426,
0.098722,
0.32648,
-0.67621,
-0.62692,
],
]
batch_request = match_service_pb2.BatchMatchRequest()
batch_request_ann = match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex()
batch_request_brute_force = (
match_service_pb2.BatchMatchRequest.BatchMatchRequestPerIndex()
)
batch_request_ann.deployed_index_id = DEPLOYED_INDEX_ID
batch_request_brute_force.deployed_index_id = DEPLOYED_BRUTE_FORCE_INDEX_ID
for query in queries:
batch_request_ann.requests.append(get_request(query, DEPLOYED_INDEX_ID))
batch_request_brute_force.requests.append(
get_request(query, DEPLOYED_BRUTE_FORCE_INDEX_ID)
)
batch_request.requests.append(batch_request_ann)
batch_request.requests.append(batch_request_brute_force)
response = stub.BatchMatch(batch_request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum(
[
len(
set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(
set(get_neighbors(test[i], DEPLOYED_INDEX_ID))
)
)
for i in range(len(test))
]
) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Run in Google Cloud Notebooks View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/vertex-ai/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in a Google Cloud Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). That is, you will have to create the VPC network below and then create a new notebook instance that uses that VPC. * If you run it in the colab or a Google Cloud Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in Google Cloud Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the Vertex SDK for Python.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the Vertex AI (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter="PROJECT_ID:'{PROJECT_ID}'" --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = PROJECT_NUMBER[0]
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ",".join(str(x) for x in train[i]) + "]}")
f.write("\n")
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7),
}
)
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
from google.protobuf import *algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=struct_pb2.Struct())}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(
parent=PARENT, index_endpoint=index_endpoint
)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = (
list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0]
.deployed_indexes[0]
.private_endpoints.match_grpc_address
)
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum(
[
len(
set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(
set(get_neighbors(test[i], DEPLOYED_INDEX_ID))
)
)
for i in range(len(test))
]
) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Run in Google Cloud Notebooks View on GitHub OverviewThis example demonstrates how to use the GCP ANN Service. It is a high scale, low latency solution, to find similar vectors (or more specifically "embeddings") for a large corpus. Moreover, it is a fully managed offering, further reducing operational overhead. It is built upon [Approximate Nearest Neighbor (ANN) technology](https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) developed by Google Research. DatasetThe dataset used for this tutorial is the [GloVe dataset](https://nlp.stanford.edu/projects/glove/). ObjectiveIn this notebook, you will learn how to create Approximate Nearest Neighbor (ANN) Index, query against indexes, and validate the performance of the index. The steps performed include:* Create ANN Index and Brute Force Index* Create an IndexEndpoint with VPC Network* Deploy ANN Index and Brute Force Index* Perform online query* Compute recall Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Before you begin* **Prepare a VPC network**. To reduce any network overhead that might lead to unnecessary increase in overhead latency, it is best to call the ANN endpoints from your VPC via a direct [VPC Peering](https://cloud.google.com/vertex-ai/docs/general/vpc-peering) connection. The following section describes how to setup a VPC Peering connection if you don't have one. This is a one-time initial setup task. You can also reuse existing VPC network and skip this section.* **WARNING:** The match service gRPC API (to create online queries against your deployed index) has to be executed in a Google Cloud Notebook instance that is created with the following requirements: * **In the same region as where your ANN service is deployed** (for example, if you set `REGION = "us-central1"` as same as the tutorial, the notebook instance has to be in `us-central1`). * **Make sure you select the VPC network you created for ANN service** (instead of using the "default" one). * If you run it in the colab or a Google Cloud Notebook instance in a different VPC network or region, the gRPC API will fail to peer the network (InactiveRPCError).
###Code
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
PEERING_RANGE_NAME = "ucaip-haystack-range"
# Create a VPC network
! gcloud compute networks create {NETWORK_NAME} --bgp-routing-mode=regional --subnet-mode=auto --project={PROJECT_ID}
# Add necessary firewall rules
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-icmp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow icmp
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-internal --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow all --source-ranges 10.128.0.0/9
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-rdp --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:3389
! gcloud compute firewall-rules create {NETWORK_NAME}-allow-ssh --network {NETWORK_NAME} --priority 65534 --project {PROJECT_ID} --allow tcp:22
# Reserve IP range
! gcloud compute addresses create {PEERING_RANGE_NAME} --global --prefix-length=16 --network={NETWORK_NAME} --purpose=VPC_PEERING --project={PROJECT_ID} --description="peering range for uCAIP Haystack."
# Set up peering with service networking
! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={NETWORK_NAME} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}
###Output
_____no_output_____
###Markdown
* Authentication: `$ gcloud auth login` rerun this in Google Cloud Notebook terminal when you are logged out and need the credential again. InstallationDownload and install the latest (preview) version of the Vertex SDK for Python.
###Code
! pip install -U git+https://github.com/googleapis/python-aiplatform.git@main-test --user
###Output
_____no_output_____
###Markdown
Install the `h5py` to prepare sample dataset, and the `grpcio-tools` for querying against the index.
###Code
! pip install -U grpcio-tools --user
! pip install -U h5py --user
###Output
_____no_output_____
###Markdown
Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager).1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI API and Compute Engine API, and Service Networking API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,servicenetworking.googleapis.com).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
###Output
_____no_output_____
###Markdown
Otherwise, set your project ID here.
###Code
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "<your_project_id>" # @param {type:"string"}
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets.You may also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services areavailable](https://cloud.google.com/vertex-ai/docs/general/locationsavailable_regions). You maynot use a Multi-Regional Storage bucket for training with Vertex AI.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Import libraries and define constants Import the Vertex AI (unified) client library into your Python environment.
###Code
import time
import grpc
import h5py
from google.cloud import aiplatform_v1beta1
from google.protobuf import struct_pb2
REGION = "us-central1"
ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
NETWORK_NAME = "ucaip-haystack-vpc-network" # @param {type:"string"}
AUTH_TOKEN = !gcloud auth print-access-token
PROJECT_NUMBER = !gcloud projects list --filter='project_id:$PROJECT_ID' --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = str(PROJECT_NUMBER).strip("[").strip("]").strip("'")
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("PROJECT_ID: {}".format(PROJECT_ID))
print("REGION: {}".format(REGION))
!gcloud config set project {PROJECT_ID}
!gcloud config set ai_platform/region {REGION}
###Output
_____no_output_____
###Markdown
Prepare the DataThe GloVe dataset consists of a set of pre-trained embeddings. The embeddings are split into a "train" split, and a "test" split.We will create a vector search index from the "train" split, and use the embedding vectors in the "test" split as query vectors to test the vector search index.NOTE: While the data split uses the term "train", these are pre-trained embeddings and thus are ready to be indexed for search. The terms "train" and "test" split are used just to be consistent with usual machine learning terminology.Download the GloVe dataset.
###Code
! gsutil cp gs://cloud-samples-data/ai-platform-unified/matching_engine/glove-100-angular.hdf5 .
###Output
_____no_output_____
###Markdown
Read the data into memory.
###Code
# The number of nearest neighbors to be retrieved from database for each query.
k = 10
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
train[0]
###Output
_____no_output_____
###Markdown
Save the train split in JSONL format.
###Code
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ",".join(str(x) for x in train[i]) + "]}")
f.write("\n")
###Output
_____no_output_____
###Markdown
Upload the training data to GCS.
###Code
# NOTE: Everything in this GCS DIR will be DELETED before uploading the data.
! gsutil rm -rf {BUCKET_NAME}
! gsutil cp glove100.json {BUCKET_NAME}/glove100.json
! gsutil ls {BUCKET_NAME}
###Output
_____no_output_____
###Markdown
Create Indexes Create ANN Index (for Production Usage)
###Code
index_client = aiplatform_v1beta1.IndexServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
DISPLAY_NAME_BRUTE_FORCE = DISPLAY_NAME + "_brute_force"
###Output
_____no_output_____
###Markdown
Create the ANN index configuration:Please read the documentation to understand the various configuration parameters that can be used to tune the index
###Code
treeAhConfig = struct_pb2.Struct(
fields={
"leafNodeEmbeddingCount": struct_pb2.Value(number_value=500),
"leafNodesToSearchPercent": struct_pb2.Value(number_value=7),
}
)
algorithmConfig = struct_pb2.Struct(
fields={"treeAhConfig": struct_pb2.Value(struct_value=treeAhConfig)}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
ann_index = {
"display_name": DISPLAY_NAME,
"description": "Glove 100 ANN index",
"metadata": struct_pb2.Value(struct_value=metadata),
}
ann_index = index_client.create_index(parent=PARENT, index=ann_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if ann_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_RESOURCE_NAME = ann_index.result().name
INDEX_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create Brute Force Index (for Ground Truth)The brute force index uses a naive brute force method to find the nearest neighbors. This method is not fast or efficient. Hence brute force indices are not recommended for production usage. They are to be used to find the "ground truth" set of neighbors, so that the "ground truth" set can be used to measure recall of the indices being tuned for production usage. To ensure an apples to apples comparison, the `distanceMeasureType` and `featureNormType`, `dimensions` of the brute force index should match those of the production indices being tuned.Create the brute force index configuration:
###Code
algorithmConfig = struct_pb2.Struct(
fields={"bruteForceConfig": struct_pb2.Value(struct_value=Struct())}
)
config = struct_pb2.Struct(
fields={
"dimensions": struct_pb2.Value(number_value=DIMENSIONS),
"approximateNeighborsCount": struct_pb2.Value(number_value=150),
"distanceMeasureType": struct_pb2.Value(string_value="DOT_PRODUCT_DISTANCE"),
"algorithmConfig": struct_pb2.Value(struct_value=algorithmConfig),
}
)
metadata = struct_pb2.Struct(
fields={
"config": struct_pb2.Value(struct_value=config),
"contentsDeltaUri": struct_pb2.Value(string_value=BUCKET_NAME),
}
)
brute_force_index = {
"display_name": DISPLAY_NAME_BRUTE_FORCE,
"description": "Glove 100 index (brute force)",
"metadata": struct_pb2.Value(struct_value=metadata),
}
brute_force_index = index_client.create_index(parent=PARENT, index=brute_force_index)
# Poll the operation until it's done successfullly.
# This will take ~45 min.
while True:
if brute_force_index.done():
break
print("Poll the operation to create index...")
time.sleep(60)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.result().name
INDEX_BRUTE_FORCE_RESOURCE_NAME
###Output
_____no_output_____
###Markdown
Create an IndexEndpoint with VPC Network
###Code
index_endpoint_client = aiplatform_v1beta1.IndexEndpointServiceClient(
client_options=dict(api_endpoint=ENDPOINT)
)
VPC_NETWORK_NAME = "projects/{}/global/networks/{}".format(PROJECT_NUMBER, NETWORK_NAME)
VPC_NETWORK_NAME
index_endpoint = {
"display_name": "index_endpoint_for_demo",
"network": VPC_NETWORK_NAME,
}
r = index_endpoint_client.create_index_endpoint(
parent=PARENT, index_endpoint=index_endpoint
)
r.result()
INDEX_ENDPOINT_NAME = r.result().name
INDEX_ENDPOINT_NAME
###Output
_____no_output_____
###Markdown
Deploy Indexes Deploy ANN Index
###Code
DEPLOYED_INDEX_ID = "ann_glove_deployed"
deploy_ann_index = {
"id": DEPLOYED_INDEX_ID,
"display_name": DEPLOYED_INDEX_ID,
"index": INDEX_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_ann_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Deploy Brute Force Index
###Code
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed"
deploy_brute_force_index = {
"id": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"display_name": DEPLOYED_BRUTE_FORCE_INDEX_ID,
"index": INDEX_BRUTE_FORCE_RESOURCE_NAME,
}
r = index_endpoint_client.deploy_index(
index_endpoint=INDEX_ENDPOINT_NAME, deployed_index=deploy_brute_force_index
)
# Poll the operation until it's done successfullly.
while True:
if r.done():
break
print("Poll the operation to deploy index...")
time.sleep(60)
r.result()
###Output
_____no_output_____
###Markdown
Create Online QueriesAfter you built your indexes, you may query against the deployed index through the online querying gRPC API (Match service) within the virtual machine instances from the same region (for example 'us-central1' in this tutorial). The way a client uses this gRPC API is by folowing steps:* Write `match_service.proto` locally* Clone the repository that contains the dependencies of match_service.proto in the Terminal:`$ mkdir third_party && cd third_party``$ git clone https://github.com/googleapis/googleapis.git`* Compile the protocal buffer (see below)* Obtain the index endpoint* Use a code-generated stub to make the call, passing the parameter values
###Code
%%writefile match_service.proto
syntax = "proto3";
package google.cloud.aiplatform.container.v1beta1;
import "google/rpc/status.proto";
// MatchService is a Google managed service for efficient vector similarity
// search at scale.
service MatchService {
// Returns the nearest neighbors for the query. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc Match(MatchRequest) returns (MatchResponse) {}
// Returns the nearest neighbors for batch queries. If it is a sharded
// deployment, calls the other shards and aggregates the responses.
rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
}
// Parameters for a match query.
message MatchRequest {
// The ID of the DeploydIndex that will serve the request.
// This MatchRequest is sent to a specific IndexEndpoint of the Control API,
// as per the IndexEndpoint.network. That IndexEndpoint also has
// IndexEndpoint.deployed_indexes, and each such index has an
// DeployedIndex.id field.
// The value of the field below must equal one of the DeployedIndex.id
// fields of the IndexEndpoint that is being called for this request.
string deployed_index_id = 1;
// The embedding values.
repeated float float_val = 2;
// The number of nearest neighbors to be retrieved from database for
// each query. If not set, will use the default from
// the service configuration.
int32 num_neighbors = 3;
// The list of restricts.
repeated Namespace restricts = 4;
// Crowding is a constraint on a neighbor list produced by nearest neighbor
// search requiring that no more than some value k' of the k neighbors
// returned have the same value of crowding_attribute.
// It's used for improving result diversity.
// This field is the maximum number of matches with the same crowding tag.
int32 per_crowding_attribute_num_neighbors = 5;
// The number of neighbors to find via approximate search before
// exact reordering is performed. If not set, the default value from scam
// config is used; if set, this value must be > 0.
int32 approx_num_neighbors = 6;
// The fraction of the number of leaves to search, set at query time allows
// user to tune search performance. This value increase result in both search
// accuracy and latency increase. The value should be between 0.0 and 1.0. If
// not set or set to 0.0, query uses the default value specified in
// NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
int32 leaf_nodes_to_search_percent_override = 7;
}
// Response of a match query.
message MatchResponse {
message Neighbor {
// The ids of the matches.
string id = 1;
// The distances of the matches.
double distance = 2;
}
// All its neighbors.
repeated Neighbor neighbor = 1;
}
// Parameters for a batch match query.
message BatchMatchRequest {
// Batched requests against one index.
message BatchMatchRequestPerIndex {
// The ID of the DeploydIndex that will serve the request.
string deployed_index_id = 1;
// The requests against the index identified by the above deployed_index_id.
repeated MatchRequest requests = 2;
// Selects the optimal batch size to use for low-level batching. Queries
// within each low level batch are executed sequentially while low level
// batches are executed in parallel.
// This field is optional, defaults to 0 if not set. A non-positive number
// disables low level batching, i.e. all queries are executed sequentially.
int32 low_level_batch_size = 3;
}
// The batch requests grouped by indexes.
repeated BatchMatchRequestPerIndex requests = 1;
}
// Response of a batch match query.
message BatchMatchResponse {
// Batched responses for one index.
message BatchMatchResponsePerIndex {
// The ID of the DeployedIndex that produced the responses.
string deployed_index_id = 1;
// The match responses produced by the index identified by the above
// deployed_index_id. This field is set only when the query against that
// index succeed.
repeated MatchResponse responses = 2;
// The status of response for the batch query identified by the above
// deployed_index_id.
google.rpc.Status status = 3;
}
// The batched responses grouped by indexes.
repeated BatchMatchResponsePerIndex responses = 1;
}
// Namespace specifies the rules for determining the datapoints that are
// eligible for each matching query, overall query is an AND across namespaces.
message Namespace {
// The string name of the namespace that this proto is specifying,
// such as "color", "shape", "geo", or "tags".
string name = 1;
// The allowed tokens in the namespace.
repeated string allow_tokens = 2;
// The denied tokens in the namespace.
// The denied tokens have exactly the same format as the token fields, but
// represents a negation. When a token is denied, then matches will be
// excluded whenever the other datapoint has that token.
//
// For example, if a query specifies {color: red, blue, !purple}, then that
// query will match datapoints that are red or blue, but if those points are
// also purple, then they will be excluded even if they are red/blue.
repeated string deny_tokens = 3;
}
###Output
_____no_output_____
###Markdown
Compile the protocol buffer, and then `match_service_pb2.py` and `match_service_pb2_grpc.py` are generated.
###Code
! python -m grpc_tools.protoc -I=. --proto_path=third_party/googleapis --python_out=. --grpc_python_out=. match_service.proto
###Output
_____no_output_____
###Markdown
Obtain the Private Endpoint:
###Code
DEPLOYED_INDEX_SERVER_IP = (
list(index_endpoint_client.list_index_endpoints(parent=PARENT))[0]
.deployed_indexes[0]
.private_endpoints.match_grpc_address
)
DEPLOYED_INDEX_SERVER_IP
###Output
_____no_output_____
###Markdown
Test your query:
###Code
import match_service_pb2
import match_service_pb2_grpc
channel = grpc.insecure_channel("{}:10000".format(DEPLOYED_INDEX_SERVER_IP))
stub = match_service_pb2_grpc.MatchServiceStub(channel)
# Test query
query = [
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
request = match_service_pb2.MatchRequest()
request.deployed_index_id = DEPLOYED_INDEX_ID
for val in query:
request.float_val.append(val)
response = stub.Match(request)
response
###Output
_____no_output_____
###Markdown
Compute RecallUse deployed brute force Index as the ground truth to calculate the recall of ANN Index:
###Code
def get_neighbors(embedding, deployed_index_id):
request = match_service_pb2.MatchRequest(num_neighbors=k)
request.deployed_index_id = deployed_index_id
for val in embedding:
request.float_val.append(val)
response = stub.Match(request)
return [int(n.id) for n in response.neighbor]
# This will take 5-10 min
recall = sum(
[
len(
set(get_neighbors(test[i], DEPLOYED_BRUTE_FORCE_INDEX_ID)).intersection(
set(get_neighbors(test[i], DEPLOYED_INDEX_ID))
)
)
for i in range(len(test))
]
) / (1.0 * len(test) * k)
print("Recall: {}".format(recall))
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.You can also manually delete resources that you created by running the following code.
###Code
index_client.delete_index(name=INDEX_RESOURCE_NAME)
index_client.delete_index(name=INDEX_BRUTE_FORCE_RESOURCE_NAME)
index_endpoint_client.delete_index_endpoint(name=INDEX_ENDPOINT_NAME)
###Output
_____no_output_____ |
all_models/model_2.ipynb | ###Markdown
Read the CSV and Perform Basic Data Cleaning
###Code
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
df.columns
###Output
_____no_output_____
###Markdown
Select your features (columns)
###Code
# This will also be used as your x values.
selected_features = df[['koi_disposition', 'koi_fpflag_co', 'koi_fpflag_nt', 'koi_fpflag_ss', 'koi_model_snr', 'koi_prad', 'koi_duration_err2']]
X = selected_features[['koi_fpflag_co', 'koi_fpflag_nt', 'koi_fpflag_ss', 'koi_model_snr', 'koi_prad', 'koi_duration_err2']]
y = selected_features["koi_disposition"]
X.head()
###Output
_____no_output_____
###Markdown
Create a Train Test SplitUse `koi_disposition` for the y values
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=30)
X_train.head()
###Output
_____no_output_____
###Markdown
Pre-processingScale the data using the MinMaxScaler and perform some feature selection
###Code
# Scale your data to fit data around 0
from sklearn.preprocessing import StandardScaler
X_scaler = StandardScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
Train the Model
###Code
# Loop through different k values to see which has the highest accuracy
# Note: We only use odd numbers because we don't want any ties
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
train_scores = []
test_scores = []
for k in range(1, 20, 2):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train_scaled, y_train)
train_score = knn.score(X_train_scaled, y_train)
test_score = knn.score(X_test_scaled, y_test)
train_scores.append(train_score)
test_scores.append(test_score)
print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}")
plt.plot(range(1, 20, 2), train_scores, marker='o')
plt.plot(range(1, 20, 2), test_scores, marker="x")
plt.xlabel("k neighbors")
plt.ylabel("Testing accuracy Score")
plt.show()
# Note that k: 9 provides the best accuracy where the classifier starts to stablize
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train_scaled, y_train)
print('k=.8 Test Acc: %.3f' % knn.score(X_test_scaled, y_test))
print(f"Training Data Score: {knn.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {knn.score(X_test_scaled, y_test)}")
###Output
Training Data Score: 0.8909021552546252
Testing Data Score: 0.8518306636155606
###Markdown
Hyperparameter TuningUse `GridSearchCV` to tune the model's parameters
###Code
# Create the GridSearchCV model
# Train the model with GridSearch
print(grid2.best_params_)
print(grid2.best_score_)
###Output
_____no_output_____
###Markdown
Save the Model
###Code
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'your_name.sav'
joblib.dump(your_model, filename)
###Output
_____no_output_____ |
Demo/XRD_dimensionlity_demo.ipynb | ###Markdown
small dataset XRD classification using machine learning IntroductionThis is meant to be a (relatively) self-contained example of XRD classification on small dataset via physics based data augmentationThe overall procedure is: 1. Load the experimental and theoretical XRD spectra with dimensionality labels 2. Data preprocessing for experimental data 3. Data augmentation for both experimental and theoretical spectra based on the characteristics of thin film XRD measurement 4. Perform dimensionality/space group classification based on the post-processed data 5. Cross validation and hyperparameter tuning The Convolutional Neural Network(CNN) used in this demo is slightly modified from our paper (replace global average pooling layer with a fully connected layer) to shorten the training time. For the detailed structure and class activation maps(CAM), please refer to the space_group_a_CNN.py file in our repository.You will need Keras and Tensorflow package to run the CNN model. Preload libraries and functionsFirst of all, let's import libraries that will be used in this example
###Code
import time
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
from scipy.signal import savgol_filter
from scipy.signal import find_peaks_cwt
from sklearn.model_selection import train_test_split
import warnings
from keras.models import Model
from keras.models import Sequential
from sklearn.preprocessing import OneHotEncoder
import keras as K
warnings.filterwarnings('ignore')
###Output
Using TensorFlow backend.
###Markdown
Import multiple classification algorithms from scikt-learn
###Code
# Multinomial Naive Bayes Classifier
def naive_bayes_classifier(train_x, train_y):
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB(alpha=0.01)
model.fit(train_x, train_y)
return model
# KNN Classifier
def knn_classifier(train_x, train_y):
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(train_x, train_y)
return model
# Random Forest Classifier
def random_forest_classifier(train_x, train_y):
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100)
model.fit(train_x, train_y)
return model
# Decision Tree Classifier
def decision_tree_classifier(train_x, train_y):
from sklearn import tree
model = tree.DecisionTreeClassifier()
model.fit(train_x, train_y)
return model
# GBDT(Gradient Boosting Decision Tree) Classifier
def gradient_boosting_classifier(train_x, train_y):
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(n_estimators=100)
model.fit(train_x, train_y)
return model
# SVM Classifier
def svm_classifier(train_x, train_y):
from sklearn.svm import SVC
model = SVC(kernel='rbf', probability=True)
model.fit(train_x, train_y)
return model
# 3 layer neural network classficiation
def mlp_classifier(train_x,train_y):
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(hidden_layer_sizes=(256,128,32), max_iter=20, alpha=1e-4,
solver='ada,', verbose=10, tol=1e-6, random_state=1,
learning_rate_init=.1)
model.fit(train_x,train_y)
return model
def CNN(train_x, train_y, exp_max=1350,exp_min=0):
#CNN hyperparameters
BATCH_SIZE=128
n_input = exp_max - exp_min
n_classes = 3
# Define network structure
model = Sequential()
model.add(K.layers.Conv1D(32, 8,strides=8, padding='same',input_shape=(n_input,1), activation='relu'))
model.add(K.layers.Conv1D(32, 5,strides=5, padding='same', activation='relu'))
model.add(K.layers.Conv1D(16, 3,strides=3, padding='same', activation='relu'))
model.add(K.layers.Flatten())
model.add(K.layers.Dense(1024,activation='relu'))
model.add(K.layers.Dense(n_classes, activation='softmax'))
#Define optimizer
optimizer = K.optimizers.rmsprop()
# Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['categorical_accuracy'])
train_x = train_x.reshape(train_x.shape[0],n_input,1)
model.fit(train_x,train_y,batch_size=BATCH_SIZE, nb_epoch=15,
verbose=1)
return model
###Output
_____no_output_____
###Markdown
Let's create functions that can read .ASC and .xy files from subfolders(this function is not used in the demo)
###Code
#Gets .ASC files from directory
def spectra_list(path,excluded):
file_pth= [os.path.join(d, x) for d, dirs, files in os.walk(path) for x in files if x.endswith(".ASC") and excluded not in x]
return file_pth
#Gets .XY files from directory
def spectra_list2(path):
file_pth= [os.path.join(d, x) for d, dirs, files in os.walk(path) for x in files if x.endswith(".xy")]
return file_pth
#Groups all curves within a symmetry group into as single dataframe
def group(spectra,k):
groups=[]
for indx,vals in enumerate(spectra[k]):
groups.append(pd.read_csv(spectra[k][indx], delim_whitespace=True, header=None))
df=pd.concat(groups, axis=1)
return df
###Output
_____no_output_____
###Markdown
Since the XRD intensities are arbitrary units, we will scale the XRD intensities from 0 to 1 for ML input. Let's define functions that normalize the data from 0 to 1 based on the data structure. You can use min_max scaler from SKlearn but since the data structure is not standardized. We define our own min_max scaler.
###Code
#Data normalization from 0 to 1 for double column dataframe
def normdata(data):
(len1,w1) = np.shape(data)
ndata = np.zeros([len1,w1//2])
for i in range(w1//2):
ndata[:,i]=(data[:,2*i+1]-min(data[:,2*i+1]))/(max(data[:,2*i+1])-min(data[:,2*i+1]))
return ndata
#data normalization from 0 to 1 for single column dataframe
def normdatasingle(data):
(len1,w1) = np.shape(data)
ndata = np.zeros([len1,w1])
for i in range(w1):
ndata[:,i]=(data[:,i]-min(data[:,i]))/(max(data[:,i])-min(data[:,i]))
return ndata
###Output
_____no_output_____
###Markdown
We only have ~200 spectra in our subfolders, let's define data augmentation functions based on our domain knowledge on thin-film and power XRD spectrum
###Code
#data augmendatation for simulated XRD spectrum
def augdata(data,num,dframe,minn,maxn,labels):
np.random.seed(1234)
(len1,w1) = np.shape(data)
augd =np.zeros([len1,num])
naugd=np.zeros([len1,num])
newaugd=np.zeros([len1,num])
crop_augd = np.zeros([maxn-minn,num])
par1 = labels
pard = []
for i in range(num):
rnd = np.random.randint(0,w1)
# create the first filter for peak elimination
dumb= np.repeat(np.random.choice([0,1,1],300),len1//300)
dumb1= np.append(dumb,np.zeros([len1-len(dumb),]))
# create the second filter for peak scaling
dumbrnd= np.repeat(np.random.rand(100,),len1//100)
dumbrnd1=np.append(dumbrnd,np.zeros([len1-len(dumbrnd),]))
#peak eleminsation and scaling
augd[:,i] = np.multiply((data[:,rnd]),dumbrnd1)
augd[:,i] = np.multiply(augd[:,i],dumb1)
#nomrlization
naugd[:,i] = (augd[:,i]-min(augd[:,i]))/(max(augd[:,i])-min(augd[:,i])+1e-9)
pard.append (par1[2*rnd])
#adding shift
cut = np.random.randint(-20*1,20)
#XRD spectrum shift to left
if cut>=0:
newaugd[:,i] = np.append(naugd[cut:,i],np.zeros([cut,]))
#XRD spectrum shift to right
else:
newaugd[:,i] = np.append(naugd[0:len1+cut,i],np.zeros([cut*-1,]))
crop_augd[:,i] = newaugd[minn:maxn,i]
#
return newaugd, pard,crop_augd
###Output
_____no_output_____
###Markdown
data augmentation for experimental XRD spectra
###Code
def exp_augdata(data,num,label):
np.random.seed(1234)
(len1,w1) = np.shape(data)
augd =np.zeros([len1,num])
naugd=np.zeros([len1,num])
newaugd=np.zeros([len1,num])
par=np.zeros([num,])
for i in range(num):
rnd = np.random.randint(0,w1)
# create the first filter for peak elimination
dumb= np.repeat(np.random.choice([0,1,1],300),len1//300)
dumb1= np.append(dumb,np.zeros([len1-len(dumb),]))
# create the second filter for peak scaling
dumbrnd= np.repeat(np.random.rand(200,),len1//200)
dumbrnd1=np.append(dumbrnd,np.zeros([len1-len(dumbrnd),]))
#peak eleminsation and scaling
augd[:,i] = np.multiply((data[:,rnd]),dumbrnd1)
augd[:,i] = np.multiply(augd[:,i],dumb1)
#nomrlization
naugd[:,i] = (augd[:,i]-min(augd[:,i]))/(max(augd[:,i])-min(augd[:,i])+1e-9)
par[i,] =label[rnd,]
#adding shift
cut = np.random.randint(-20*1,20)
#XRD spectrum shift to left
if cut>=0:
newaugd[:,i] = np.append(naugd[cut:,i],np.zeros([cut,]))
#XRD spectrum shift to right
else:
newaugd[:,i] = np.append(naugd[0:len1+cut,i],np.zeros([cut*-1,]))
return newaugd, par
###Output
_____no_output_____
###Markdown
The experimental data contains noise and background. Let's write a function to remove it.
###Code
#extracting exprimental data
def exp_data_processing (data,minn,maxn,window):
(len1,w1) = np.shape(data)
nexp1 =np.zeros([maxn-minn,w1])
for i in range(w1):
#savgol_filter to smooth the data
new1 = savgol_filter(data[minn:maxn,i], 31, 3)
#peak finding
zf= find_peaks_cwt(new1, np.arange(10,15), noise_perc=0.01)
#background substraction
for j in range(len(zf)-1):
zf_start= np.maximum(0,zf[j+1]-window//2)
zf_end = np.minimum(zf[j+1]+window//2,maxn)
peak = new1[zf_start:zf_end]
##abritaryly remove 1/4 data
npeak = np.maximum(0,peak-max(np.partition(peak,window//5 )[0:window//5]))
nexp1[zf_start:zf_end,i]= npeak
return nexp1
###Output
_____no_output_____
###Markdown
1. Load the dataThe XRD spectra contain both experimental and theoretical data. The theoretical spectra are power XRD spectra. The specific compound formulae of each XRD spectrum is scrubbed for data privacy issues; keep an eye open for our upcoming NIPS and arXiv publications for labeled datasets.
###Code
# Load simulated XRD spectra
theor=pd.read_csv('theor_d.csv',header=None)
# Load meaured XRD spectra
exp=pd.read_csv('exp_d.csv',header=None)
###Output
_____no_output_____
###Markdown
We can take a look at the data
###Code
theor.head(10)
###Output
_____no_output_____
###Markdown
Let's convert this big matrix from string to number and take out the first row as "labels" for our machine learning problem
###Code
#label
theor_labels= theor.iloc[0]
#take out the first row
theor = theor.drop(theor.index[0])
#convert from string to number
theor = theor.apply(pd.to_numeric, errors='coerce')
#convert from pandas dataframe to numpy array
theor_arr=theor.as_matrix()
#normalization
ntheor = normdata (theor_arr)
###Output
_____no_output_____
###Markdown
Let's plot the theoretical spectra
###Code
plt.plot(theor_arr[:,0],theor_arr[:,1],label='Theorectical')
plt.xlabel('2theta angle[degrees]')
plt.ylabel('Intensity [a.u.]')
plt.show()
plt.plot(theor_arr[:,0],ntheor[:,0],label='Theorectical')
plt.xlabel('2theta angle[degrees]')
plt.ylabel('Normalized Intensity [a.u.]')
plt.show()
###Output
_____no_output_____
###Markdown
What is size of our theoretical XRD spectra
###Code
ntheor.shape
###Output
_____no_output_____
###Markdown
Let's have a look at the experimental spectra
###Code
exp.head(10)
###Output
_____no_output_____
###Markdown
We will do the same operation as what we have done for the theoretical data
###Code
#labels
exp_labels= exp.iloc[0]
#take out the first row
exp = exp.drop(exp.index[0])
#string to number
exp=exp.apply(pd.to_numeric, errors='coerce')
#dataframe to array
exp_arr=exp.as_matrix()
#We didn't simulate the peak at 5.00 degrees, so start from 5.04
exp_arr=exp_arr[1:,:]
#normalization
ntheor = normdata (theor_arr)
nexp = normdata (exp_arr)
###Output
_____no_output_____
###Markdown
What is the shape of this matrix after normalization? (in other words, what are the available experimental data size?)
###Code
nexp.shape
###Output
_____no_output_____
###Markdown
2. Data preprocessing Trim the dataSince not all the data has the same range(2theta angles), we need to unify the range
###Code
#define the range for spectrum (this is to unify the measurement range)
exp_min = 0
exp_max = 1350
theor_min = 0
###Output
_____no_output_____
###Markdown
We can plot the measured spectra
###Code
plt.plot(exp_arr[exp_min:exp_max,0],exp_arr[exp_min:exp_max,3],label='Experimental data')
plt.xlabel('2theta angle[degrees]')
plt.ylabel('Intensity [a.u.]')
plt.show()
###Output
_____no_output_____
###Markdown
Background and noise subtractionwindow size is a hyperparamtere that we can change to determine the width of peaks. We call a function which is previously defined(exp_data_processing) to remove the measurement noise and signals from the substrate
###Code
#window size for experimental data extraction
window =15
theor_max = theor_min+exp_max-exp_min
#experimetal data input
post_exp= normdatasingle(exp_data_processing (nexp,exp_min,exp_max,window))
###Output
_____no_output_____
###Markdown
Let's plot the experimental spectra again after data post-processing
###Code
fig ,ax1 = plt.subplots()
ax1.plot(exp_arr[exp_min:exp_max,0],exp_arr[exp_min:exp_max,3])
ax1.set_xlabel('2theta angle[degrees]')
ax1.set_ylabel('Intensity [a.u.]')
ax2 = ax1.twinx()
ax2.plot(exp_arr[exp_min:exp_max,0],post_exp[:,1],color ='r')
ax2.set_ylabel('Normalized Intensity [a.u.]')
fig.tight_layout()
plt.legend(['Post processing'])
plt.show()
###Output
_____no_output_____
###Markdown
3. Data augmentation Let's augment the data for the theoretical dataset firstSpecify how many data points we augmented for theoretical and experimental data
###Code
#let's start to do the data augmentation.
theor_aug_num = 1000
exp_aug_num = 1000
augd,pard,crop_augd = augdata(ntheor,theor_aug_num,theor,theor_min,theor_max,theor_labels)
###Output
_____no_output_____
###Markdown
Let's start from one theoretical spectra
###Code
rnd = 0#np.random.randint(0,100)
demo_t = ntheor[theor_min:theor_max,(rnd)]
demo_x = theor_arr[theor_min:theor_max,0]
plt.plot(demo_x,demo_t,label='Original')
plt.legend(loc='upper right')
plt.show()
len(demo_x)
###Output
_____no_output_____
###Markdown
Some peaks will not be visible due to preferred orientation, crystal size etc. We will add a periodic blocking filter which randomly eliminates peaks
###Code
#add in the first filter (peak elimination)
dum1= np.repeat(np.random.choice([0,0,1],270),len(demo_x)//270)
demo_1st = np.multiply( demo_t,dum1)
#plot
plt.plot(demo_x,demo_1st,label='Peak Elimination', color= 'r')
plt.legend(loc='upper right')
plt.show()
###Output
_____no_output_____
###Markdown
The relative intensities of those peaks also depends on the preferred orientation . We will add another periodic filter that scales intensities randomly
###Code
dum2= np.repeat(np.random.rand(135,),len(demo_x)//135)
demo_2nd = np.multiply( demo_1st,dum2)
#plot
plt.plot(demo_x,demo_2nd,label='Peak scaling', color= 'k')
plt.legend(loc='upper right')
plt.show()
###Output
_____no_output_____
###Markdown
Lastly, the strain and instrumental error may cause the shift of the spectra. We will shift the spectra within a limited range.
###Code
cut = np.random.randint(-20*1,20)
#XRD spectrum shift to left
if cut>=0:
demo_3rd = np.append(demo_2nd[cut:,],np.zeros([cut,]))
#XRD spectrum shift to right
else:
demo_3rd = np.append(demo_2nd[0:len(demo_x)+cut,],np.zeros([cut*-1,]))
#plot
plt.plot(demo_x,demo_2nd,label='Peak shift', color= 'b')
plt.legend(loc='upper right')
plt.show()
###Output
_____no_output_____
###Markdown
We can repeat this augmentation process many times for all spectra.Now we will augment the spectra both for experiment and theory from ~200 to 2000!We should add labels to those augmented spectra
###Code
#convert theorectical label from dimensionality to numbers
directory = ['0','2','3']
label_t=np.zeros([len(pard),])
for i in range(len(pard)):
temp = pard[i]
label_t[i]=directory.index(temp[0])
#convert experimental label from dimensionality to numbers
par_exp = exp_labels
label_exp=np.zeros([len(par_exp)//2,])
for i in range(len(par_exp)//2):
temp = par_exp[2*i]
label_exp[i]=directory.index(temp[0])
###Output
_____no_output_____
###Markdown
4 Classification and cross validation using various ML algorithms After data extraction, data preprocessing and data augmentation. We have ~2000 spectra now as an Input to train our machine learning algorithm. We can use part of those spectra to fine tune these hyperparameters and test the "untouched" spectra . The test was done in the paper and will not be conducted here for the sake of timeLet's determine how many spectra we want to use to do a cross validation for our machine learning algorithm.The Convolutional Neural Network(CNN) used in this demo is slightly modified from our paper (replace global average pooling layer with a fully connected layer) to shorten the training time. For the detailed structure and class activation maps(CAM), please refer to the space_group_a_CNN.py file in our repository.
###Code
#define the number of experimetal spectra we use for a N flold cross validation
exp_num =70
X_exp = np.transpose(post_exp[:,0:exp_num])
y_exp = label_exp[0:exp_num]
#train and test split for the experimental data
#X_train_exp, X_test_exp, y_train_exp, y_test_exp = train_test_split(X_exp
# ,y_exp , test_size=0.33,random_state=1)
#train and test split for the theorectical data
X_th = np.transpose(crop_augd )
y_th = label_t
#X_train_th, X_test_th, y_train_th, y_test_th = train_test_split(
# X_th, y_th, test_size=0.33,random_state=1)
#convert the labels to onehot encoder
enc = OneHotEncoder(sparse=False)
y_th_onehot = enc.fit_transform(y_th .reshape(-1,1))
y_exp_onehot = enc.transform(y_exp.reshape(-1,1))
###Output
_____no_output_____
###Markdown
Split the data to N folds ---(N-1)/N% training, 1/N% test)
###Code
n_fold = 5
from sklearn.model_selection import KFold
k_fold = KFold(n_splits=n_fold, shuffle=True,random_state=30)
###Output
_____no_output_____
###Markdown
choose your favorate machine learning algorithem
###Code
test_classifier = ['RF']
classifiers = {'NB':naive_bayes_classifier,
'KNN' :knn_classifier,
'RF':random_forest_classifier,
'DT':decision_tree_classifier,
'SVM':svm_classifier,
'GBDT':gradient_boosting_classifier,
'NN':mlp_classifier,
'CNN': CNN
}
accuracy_exp = np.empty((n_fold,1))
start_time = time.time()
for classifier in test_classifier:
print ('******************* %s ********************' % test_classifier)
for k, (train, test) in enumerate(k_fold.split(X_exp, y_exp)):
#data augmentation to experimenal traning dataset
temp_x = X_exp[train]
temp_y = y_exp[train]
exp_train_x,exp_train_y = exp_augdata(temp_x.T,exp_aug_num ,temp_y)
#combine theorectical and experimenal dataset for training
train_combine = np.concatenate((X_th,exp_train_x.T))
train_y = np.concatenate((y_th,exp_train_y))
#predict experimental prediction accuracy
if classifier == 'CNN':
train_y = enc.transform(train_y.reshape(-1,1))
model = classifiers[classifier](train_combine, train_y)
predict_exp = model.predict(np.expand_dims(X_exp[test],2))
predict_exp = enc.inverse_transform(predict_exp)
else:
model = classifiers[classifier](train_combine, train_y)
predict_exp = model.predict(X_exp[test])
accuracy_exp[k] = accuracy_score(y_exp[test], predict_exp)
print ('accuracy_exp: %.2f%%' % (100 * accuracy_exp[k]))
# 5 fold cross validation
print ('CV took %fs!' % (time.time() - start_time) )
print('Cross-validation results:')
print('Folds: %i, mean acc: %.3f' % (len(accuracy_exp), np.mean(np.abs(accuracy_exp))))
###Output
CV took 23.888243s!
Cross-validation results:
Folds: 5, mean acc: 0.871
|
Classes/Class-05.05/code/functional-api.ipynb | ###Markdown
Lets see how else we can build models.Some of the examples are from [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). Different ways to build Keras models Keras - easy to start with but also possible to provide deep dives for those who need it. NLP and Image Processing are such needs. The Sequential model **The `Sequential` class** Thing to remember - Keras is API based. Sequential is a list of stacked layers - there lies its first limitation.So:1. Only feedforward models.2. Only one input (not for example a text/picture and its metadata)3. Only one output (not for example multiple regression/classification predictions for one data point)4. Linear topology
###Code
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Dense(64, activation="relu"),
layers.Dense(10, activation="softmax")
])
###Output
_____no_output_____
###Markdown
**Incrementally building a Sequential model** Like in Python list!
###Code
model = keras.Sequential()
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(10, activation="softmax"))
model.weights
###Output
_____no_output_____
###Markdown
Error - model is not built yet **Calling a model for the first time to build it**
###Code
model.build(input_shape=(None, 3))
model.weights
###Output
_____no_output_____
###Markdown
**The summary method**
###Code
model.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_2 (Dense) (None, 64) 256
dense_3 (Dense) (None, 10) 650
=================================================================
Total params: 906
Trainable params: 906
Non-trainable params: 0
_________________________________________________________________
###Markdown
**Naming models and layers with the `name` argument** You can name every layer, model and so on.
###Code
model = keras.Sequential(name="my_example_model")
model.add(layers.Dense(64, activation="relu", name="my_first_layer"))
model.add(layers.Dense(10, activation="softmax", name="my_last_layer"))
model.build((None, 3))
model.summary()
###Output
Model: "my_example_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
my_first_layer (Dense) (None, 64) 256
my_last_layer (Dense) (None, 10) 650
=================================================================
Total params: 906
Trainable params: 906
Non-trainable params: 0
_________________________________________________________________
###Markdown
**Specifying the input shape of your model in advance** With that you can see the summary without actually building the model
###Code
model = keras.Sequential()
model.add(keras.Input(shape=(3,)))
model.add(layers.Dense(64, activation="relu"))
model.summary()
model.add(layers.Dense(10, activation="softmax"))
model.summary()
###Output
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_6 (Dense) (None, 64) 256
dense_7 (Dense) (None, 10) 650
=================================================================
Total params: 906
Trainable params: 906
Non-trainable params: 0
_________________________________________________________________
###Markdown
The Functional API A simple example **A simple Functional model with two `Dense` layers**
###Code
inputs = keras.Input(shape=(3,), name="my_input")
features = layers.Dense(64, activation="relu")(inputs)
outputs = layers.Dense(10, activation="softmax")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
inputs = keras.Input(shape=(3,), name="my_input")
inputs.shape
inputs.dtype
###Output
_____no_output_____
###Markdown
This is what we did - add another layer on top of the previous one
###Code
features = layers.Dense(64, activation="relu")(inputs)
features.shape
outputs = layers.Dense(10, activation="softmax")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
###Output
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
my_input (InputLayer) [(None, 3)] 0
dense_8 (Dense) (None, 64) 256
dense_9 (Dense) (None, 10) 650
=================================================================
Total params: 906
Trainable params: 906
Non-trainable params: 0
_________________________________________________________________
###Markdown
Multi-input, multi-output models **A multi-input, multi-output Functional model** Lets image a usecase of dealing with support tickets. We need to move tickets to appropriate departaments. What we know: 1. Title of ticket (text)2. Text of the ticket (text)3. Tags added by the user (one-hot encoding)We expect to know:1. Priority of the ticket (regression)2. Departament where to move the ticket (classification)
###Code
vocabulary_size = 10000 #this is what we did on bert - how many words do we use from the dictionary
num_tags = 100
num_departments = 4
title = keras.Input(shape=(vocabulary_size,), name="title")
text_body = keras.Input(shape=(vocabulary_size,), name="text_body")
tags = keras.Input(shape=(num_tags,), name="tags")
features = layers.Concatenate()([title, text_body, tags])
features = layers.Dense(64, activation="relu")(features)
priority = layers.Dense(1, activation="sigmoid", name="priority")(features)
department = layers.Dense(
num_departments, activation="softmax", name="department")(features)
model = keras.Model(inputs=[title, text_body, tags], outputs=[priority, department])
###Output
_____no_output_____
###Markdown
Training a multi-input, multi-output model **Training a model by providing lists of input & target arrays**
###Code
import numpy as np
num_samples = 1280
title_data = np.random.randint(0, 2, size=(num_samples, vocabulary_size))
text_body_data = np.random.randint(0, 2, size=(num_samples, vocabulary_size))
tags_data = np.random.randint(0, 2, size=(num_samples, num_tags))
priority_data = np.random.random(size=(num_samples, 1))
department_data = np.random.randint(0, 2, size=(num_samples, num_departments))
model.compile(optimizer="rmsprop",
loss=["mean_squared_error", "categorical_crossentropy"],
metrics=[["mean_absolute_error"], ["accuracy"]])
model.fit([title_data, text_body_data, tags_data],
[priority_data, department_data],
epochs=1)
model.evaluate([title_data, text_body_data, tags_data],
[priority_data, department_data])
priority_preds, department_preds = model.predict([title_data, text_body_data, tags_data])
###Output
40/40 [==============================] - 2s 21ms/step - loss: 21.9741 - priority_loss: 0.3349 - department_loss: 21.6392 - priority_mean_absolute_error: 0.5032 - department_accuracy: 0.2242
40/40 [==============================] - 1s 8ms/step - loss: 23.2868 - priority_loss: 0.3454 - department_loss: 22.9414 - priority_mean_absolute_error: 0.5125 - department_accuracy: 0.2578
###Markdown
**Training a model by providing dicts of input & target arrays** Here we are using names - for complex models
###Code
model.compile(optimizer="rmsprop",
loss={"priority": "mean_squared_error", "department": "categorical_crossentropy"},
metrics={"priority": ["mean_absolute_error"], "department": ["accuracy"]})
model.fit({"title": title_data, "text_body": text_body_data, "tags": tags_data},
{"priority": priority_data, "department": department_data},
epochs=1)
model.evaluate({"title": title_data, "text_body": text_body_data, "tags": tags_data},
{"priority": priority_data, "department": department_data})
priority_preds, department_preds = model.predict(
{"title": title_data, "text_body": text_body_data, "tags": tags_data})
###Output
40/40 [==============================] - 2s 22ms/step - loss: 31.4611 - priority_loss: 0.3169 - department_loss: 31.1442 - priority_mean_absolute_error: 0.4839 - department_accuracy: 0.2727
40/40 [==============================] - 1s 8ms/step - loss: 46.4265 - priority_loss: 0.3205 - department_loss: 46.1060 - priority_mean_absolute_error: 0.4875 - department_accuracy: 0.1102
###Markdown
The power of the Functional API: Access to layer connectivity
###Code
keras.utils.plot_model(model, "ticket_classifier.png")
###Output
('You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) ', 'for plot_model/model_to_dot to work.')
###Markdown
 Source: https://www.manning.com/books/deep-learning-with-python This can mean retrieving fetures from another models and reusing components **Retrieving the inputs or outputs of a layer in a Functional model**
###Code
model.layers
model.layers[3].input
model.layers[3].output
###Output
_____no_output_____
###Markdown
**Creating a new model by reusing intermediate layer outputs** Adding new prediction to the existing model. Easy! Now: 3 categories of difficulty resolving the ticket
###Code
features = model.layers[4].output
difficulty = layers.Dense(3, activation="softmax", name="difficulty")(features)
new_model = keras.Model(
inputs=[title, text_body, tags],
outputs=[priority, department, difficulty])
keras.utils.plot_model(new_model, "updated_ticket_classifier.png", show_shapes=True)
###Output
_____no_output_____
###Markdown
Subclassing the Model class Rewriting our previous example as a subclassed model **A simple subclassed model** You're in charge, you can write a call and init hovewer you like without the graph-like constraints of Functional API
###Code
class CustomerTicketModel(keras.Model):
def __init__(self, num_departments):
super().__init__()
self.concat_layer = layers.Concatenate()
self.mixing_layer = layers.Dense(64, activation="relu")
self.priority_scorer = layers.Dense(1, activation="sigmoid")
self.department_classifier = layers.Dense(
num_departments, activation="softmax")
def call(self, inputs):
title = inputs["title"]
text_body = inputs["text_body"]
tags = inputs["tags"]
features = self.concat_layer([title, text_body, tags])
features = self.mixing_layer(features)
priority = self.priority_scorer(features)
department = self.department_classifier(features)
return priority, department
model = CustomerTicketModel(num_departments=4)
priority, department = model(
{"title": title_data, "text_body": text_body_data, "tags": tags_data})
model.compile(optimizer="rmsprop",
loss=["mean_squared_error", "categorical_crossentropy"],
metrics=[["mean_absolute_error"], ["accuracy"]])
model.fit({"title": title_data,
"text_body": text_body_data,
"tags": tags_data},
[priority_data, department_data],
epochs=1)
model.evaluate({"title": title_data,
"text_body": text_body_data,
"tags": tags_data},
[priority_data, department_data])
priority_preds, department_preds = model.predict({"title": title_data,
"text_body": text_body_data,
"tags": tags_data})
###Output
40/40 [==============================] - 2s 19ms/step - loss: 36.7537 - output_1_loss: 0.3170 - output_2_loss: 36.4367 - output_1_mean_absolute_error: 0.4840 - output_2_accuracy: 0.1750
40/40 [==============================] - 1s 7ms/step - loss: 33.4412 - output_1_loss: 0.3205 - output_2_loss: 33.1208 - output_1_mean_absolute_error: 0.4875 - output_2_accuracy: 0.5758
###Markdown
Beware: What subclassed models don't support This is your Python code. You cannot:1. Use summary()2. Use plot_model()3. Just snap pieces together - this is your model with potencially more room for mistakes and debugging issues. Mixing and matching different components **Creating a Functional model that includes a subclassed model** A Subclass in Functional Model, A Funtional Model inside a Subclass
###Code
class Classifier(keras.Model):
def __init__(self, num_classes=2):
super().__init__()
if num_classes == 2:
num_units = 1
activation = "sigmoid"
else:
num_units = num_classes
activation = "softmax"
self.dense = layers.Dense(num_units, activation=activation)
def call(self, inputs):
return self.dense(inputs)
inputs = keras.Input(shape=(3,))
features = layers.Dense(64, activation="relu")(inputs)
outputs = Classifier(num_classes=10)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
###Output
_____no_output_____
###Markdown
**Creating a subclassed model that includes a Functional model**
###Code
inputs = keras.Input(shape=(64,))
outputs = layers.Dense(1, activation="sigmoid")(inputs)
binary_classifier = keras.Model(inputs=inputs, outputs=outputs)
class MyModel(keras.Model):
def __init__(self, num_classes=2):
super().__init__()
self.dense = layers.Dense(64, activation="relu")
self.classifier = binary_classifier
def call(self, inputs):
features = self.dense(inputs)
return self.classifier(features)
model = MyModel()
###Output
_____no_output_____
###Markdown
Remember: Use the right tool for the job So what to choose? Using built-in training and evaluation loops **The standard workflow: `compile()`, `fit()`, `evaluate()`, `predict()`**
###Code
from tensorflow.keras.datasets import mnist
def get_mnist_model():
inputs = keras.Input(shape=(28 * 28,))
features = layers.Dense(512, activation="relu")(inputs)
features = layers.Dropout(0.5)(features)
outputs = layers.Dense(10, activation="softmax")(features)
model = keras.Model(inputs, outputs)
return model
(images, labels), (test_images, test_labels) = mnist.load_data()
images = images.reshape((60000, 28 * 28)).astype("float32") / 255
test_images = test_images.reshape((10000, 28 * 28)).astype("float32") / 255
train_images, val_images = images[10000:], images[:10000]
train_labels, val_labels = labels[10000:], labels[:10000]
model = get_mnist_model()
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
model.fit(train_images, train_labels,
epochs=3,
validation_data=(val_images, val_labels))
test_metrics = model.evaluate(test_images, test_labels)
predictions = model.predict(test_images)
###Output
Epoch 1/3
1563/1563 [==============================] - 15s 9ms/step - loss: 0.2944 - accuracy: 0.9129 - val_loss: 0.1508 - val_accuracy: 0.9555
Epoch 2/3
1563/1563 [==============================] - 13s 8ms/step - loss: 0.1661 - accuracy: 0.9527 - val_loss: 0.1247 - val_accuracy: 0.9656
Epoch 3/3
1563/1563 [==============================] - 17s 11ms/step - loss: 0.1387 - accuracy: 0.9630 - val_loss: 0.1155 - val_accuracy: 0.9700
313/313 [==============================] - 1s 4ms/step - loss: 0.1157 - accuracy: 0.9718
###Markdown
Writing your own metrics **Implementing a custom metric by subclassing the `Metric` class**
###Code
import tensorflow as tf
class RootMeanSquaredError(keras.metrics.Metric):
def __init__(self, name="rmse", **kwargs):
super().__init__(name=name, **kwargs)
self.mse_sum = self.add_weight(name="mse_sum", initializer="zeros")
self.total_samples = self.add_weight(
name="total_samples", initializer="zeros", dtype="int32")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.one_hot(y_true, depth=tf.shape(y_pred)[1])
mse = tf.reduce_sum(tf.square(y_true - y_pred))
self.mse_sum.assign_add(mse)
num_samples = tf.shape(y_pred)[0]
self.total_samples.assign_add(num_samples)
def result(self):
return tf.sqrt(self.mse_sum / tf.cast(self.total_samples, tf.float32))
def reset_state(self):
self.mse_sum.assign(0.)
self.total_samples.assign(0)
model = get_mnist_model()
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy", RootMeanSquaredError()])
model.fit(train_images, train_labels,
epochs=3,
validation_data=(val_images, val_labels))
test_metrics = model.evaluate(test_images, test_labels)
###Output
Epoch 1/3
1563/1563 [==============================] - 21s 13ms/step - loss: 0.2970 - accuracy: 0.9119 - rmse: 7.1787 - val_loss: 0.1565 - val_accuracy: 0.9537 - val_rmse: 7.3448
Epoch 2/3
1563/1563 [==============================] - 21s 14ms/step - loss: 0.1679 - accuracy: 0.9537 - rmse: 7.3537 - val_loss: 0.1228 - val_accuracy: 0.9677 - val_rmse: 7.4049
Epoch 3/3
1563/1563 [==============================] - 22s 14ms/step - loss: 0.1399 - accuracy: 0.9631 - rmse: 7.3904 - val_loss: 0.1161 - val_accuracy: 0.9705 - val_rmse: 7.4240
313/313 [==============================] - 1s 4ms/step - loss: 0.1095 - accuracy: 0.9716 - rmse: 7.4361
###Markdown
Using callbacks The EarlyStopping and ModelCheckpoint callbacks **Using the `callbacks` argument in the `fit()` method**
###Code
callbacks_list = [
keras.callbacks.EarlyStopping(
monitor="val_accuracy",
patience=2,
),
keras.callbacks.ModelCheckpoint(
filepath="checkpoint_path.keras",
monitor="val_loss",
save_best_only=True,
)
]
model = get_mnist_model()
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
model.fit(train_images, train_labels,
epochs=10,
callbacks=callbacks_list,
validation_data=(val_images, val_labels))
model = keras.models.load_model("checkpoint_path.keras")
###Output
_____no_output_____
###Markdown
Writing your own callbacks **Creating a custom callback by subclassing the `Callback` class**
###Code
from matplotlib import pyplot as plt
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))
def on_epoch_end(self, epoch, logs):
plt.clf()
plt.plot(range(len(self.per_batch_losses)), self.per_batch_losses,
label="Training loss for each batch")
plt.xlabel(f"Batch (epoch {epoch})")
plt.ylabel("Loss")
plt.legend()
plt.savefig(f"plot_at_epoch_{epoch}")
self.per_batch_losses = []
model = get_mnist_model()
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
model.fit(train_images, train_labels,
epochs=10,
callbacks=[LossHistory()],
validation_data=(val_images, val_labels))
###Output
Epoch 1/10
1563/1563 [==============================] - 17s 10ms/step - loss: 0.2993 - accuracy: 0.9108 - val_loss: 0.1558 - val_accuracy: 0.9555
Epoch 2/10
1563/1563 [==============================] - 13s 9ms/step - loss: 0.1686 - accuracy: 0.9527 - val_loss: 0.1269 - val_accuracy: 0.9657
Epoch 3/10
1563/1563 [==============================] - 14s 9ms/step - loss: 0.1385 - accuracy: 0.9626 - val_loss: 0.1124 - val_accuracy: 0.9710
Epoch 4/10
1563/1563 [==============================] - 20s 12ms/step - loss: 0.1279 - accuracy: 0.9671 - val_loss: 0.1142 - val_accuracy: 0.9716
Epoch 5/10
1563/1563 [==============================] - 23s 15ms/step - loss: 0.1187 - accuracy: 0.9707 - val_loss: 0.1178 - val_accuracy: 0.9733
Epoch 6/10
1563/1563 [==============================] - 22s 14ms/step - loss: 0.1115 - accuracy: 0.9729 - val_loss: 0.1203 - val_accuracy: 0.9745
Epoch 7/10
1563/1563 [==============================] - 23s 14ms/step - loss: 0.1071 - accuracy: 0.9743 - val_loss: 0.1151 - val_accuracy: 0.9754
Epoch 8/10
1563/1563 [==============================] - 18s 11ms/step - loss: 0.1037 - accuracy: 0.9761 - val_loss: 0.1284 - val_accuracy: 0.9759
Epoch 9/10
1563/1563 [==============================] - 15s 9ms/step - loss: 0.0993 - accuracy: 0.9776 - val_loss: 0.1270 - val_accuracy: 0.9771
Epoch 10/10
1563/1563 [==============================] - 14s 9ms/step - loss: 0.0949 - accuracy: 0.9787 - val_loss: 0.1322 - val_accuracy: 0.9767
###Markdown
Monitoring and visualization with TensorBoard
###Code
model = get_mnist_model()
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
tensorboard = keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_log_dir",
)
model.fit(train_images, train_labels,
epochs=10,
validation_data=(val_images, val_labels),
callbacks=[tensorboard])
%load_ext tensorboard
%tensorboard --logdir /full_path_to_your_log_dir
###Output
The tensorboard extension is already loaded. To reload it, use:
%reload_ext tensorboard
###Markdown
Writing your own training and evaluation loops Training versus inference Low-level usage of metrics
###Code
metric = keras.metrics.SparseCategoricalAccuracy()
targets = [0, 1, 2]
predictions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
metric.update_state(targets, predictions)
current_result = metric.result()
print(f"result: {current_result:.2f}")
values = [0, 1, 2, 3, 4]
mean_tracker = keras.metrics.Mean()
for value in values:
mean_tracker.update_state(value)
print(f"Mean of values: {mean_tracker.result():.2f}")
###Output
_____no_output_____
###Markdown
A complete training and evaluation loop **Writing a step-by-step training loop: the training step function**
###Code
model = get_mnist_model()
loss_fn = keras.losses.SparseCategoricalCrossentropy()
optimizer = keras.optimizers.RMSprop()
metrics = [keras.metrics.SparseCategoricalAccuracy()]
loss_tracking_metric = keras.metrics.Mean()
def train_step(inputs, targets):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(targets, predictions)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
logs = {}
for metric in metrics:
metric.update_state(targets, predictions)
logs[metric.name] = metric.result()
loss_tracking_metric.update_state(loss)
logs["loss"] = loss_tracking_metric.result()
return logs
###Output
_____no_output_____
###Markdown
**Writing a step-by-step training loop: resetting the metrics**
###Code
def reset_metrics():
for metric in metrics:
metric.reset_state()
loss_tracking_metric.reset_state()
###Output
_____no_output_____
###Markdown
**Writing a step-by-step training loop: the loop itself**
###Code
training_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
training_dataset = training_dataset.batch(32)
epochs = 3
for epoch in range(epochs):
reset_metrics()
for inputs_batch, targets_batch in training_dataset:
logs = train_step(inputs_batch, targets_batch)
print(f"Results at the end of epoch {epoch}")
for key, value in logs.items():
print(f"...{key}: {value:.4f}")
###Output
_____no_output_____
###Markdown
**Writing a step-by-step evaluation loop**
###Code
def test_step(inputs, targets):
predictions = model(inputs, training=False)
loss = loss_fn(targets, predictions)
logs = {}
for metric in metrics:
metric.update_state(targets, predictions)
logs["val_" + metric.name] = metric.result()
loss_tracking_metric.update_state(loss)
logs["val_loss"] = loss_tracking_metric.result()
return logs
val_dataset = tf.data.Dataset.from_tensor_slices((val_images, val_labels))
val_dataset = val_dataset.batch(32)
reset_metrics()
for inputs_batch, targets_batch in val_dataset:
logs = test_step(inputs_batch, targets_batch)
print("Evaluation results:")
for key, value in logs.items():
print(f"...{key}: {value:.4f}")
###Output
_____no_output_____
###Markdown
Leveraging fit() with a custom training loop **Implementing a custom training step to use with `fit()`**
###Code
loss_fn = keras.losses.SparseCategoricalCrossentropy()
loss_tracker = keras.metrics.Mean(name="loss")
class CustomModel(keras.Model):
def train_step(self, data):
inputs, targets = data
with tf.GradientTape() as tape:
predictions = self(inputs, training=True)
loss = loss_fn(targets, predictions)
gradients = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))
loss_tracker.update_state(loss)
return {"loss": loss_tracker.result()}
@property
def metrics(self):
return [loss_tracker]
inputs = keras.Input(shape=(28 * 28,))
features = layers.Dense(512, activation="relu")(inputs)
features = layers.Dropout(0.5)(features)
outputs = layers.Dense(10, activation="softmax")(features)
model = CustomModel(inputs, outputs)
model.compile(optimizer=keras.optimizers.RMSprop())
model.fit(train_images, train_labels, epochs=3)
class CustomModel(keras.Model):
def train_step(self, data):
inputs, targets = data
with tf.GradientTape() as tape:
predictions = self(inputs, training=True)
loss = self.compiled_loss(targets, predictions)
gradients = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))
self.compiled_metrics.update_state(targets, predictions)
return {m.name: m.result() for m in self.metrics}
inputs = keras.Input(shape=(28 * 28,))
features = layers.Dense(512, activation="relu")(inputs)
features = layers.Dropout(0.5)(features)
outputs = layers.Dense(10, activation="softmax")(features)
model = CustomModel(inputs, outputs)
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.fit(train_images, train_labels, epochs=3)
###Output
_____no_output_____ |
Chapter01/Activity01/Activity01-HandlingList.ipynb | ###Markdown
Create a list of random numbers and then create another list from this one whose elements are divisible by three. Also repeat the experiment few times (at least three times) and calculate the arithimetic mean of the differenence of length of the two lists Task-1Create a list of random numbers (at least 100 in length but we encourage you to play with the length)__Pay attention so that this list has as less number of duplicates as possible__
###Code
### Write your code here below this comment
import random
dataset = [random.randrange(0, 100) for i in range(100)]
###Output
_____no_output_____
###Markdown
Task-2Write a list comprehension to generate a second list from the one you just created. The condition of membership in the second list is divisibility by 3.
###Code
### Write your code bellow this comment
subset = [i for i in dataset if i % 3 == 0]
###Output
_____no_output_____
###Markdown
Task-3- Use the `len` function to measure the length of the first list and the second list- Store both in two different variables- Calculate the difference of length between them
###Code
### Write your code below this comment
dataset_len = len(dataset)
subset_len = len(subset)
dataset_len - subset_len
###Output
_____no_output_____
###Markdown
Task-4- Pack `Task-2` and `Task-3` in a single while loop and perform them few times in such a way that at the end you have a list with difference of length- End the while loop when desired number of experiments are finished (at least three, please feel free to do more)- Calculate the arithmetic mean (common average) on the difference of length that you have. (How to sum all values of a list?)
###Code
### Write your code below this comment.
i = 0
while i != subset_len:
print(subset[i], end=' ')
print(dataset[i], end=' ')
i+=1
i = 0
dataset_sum = 0
subset_sum = 0
while i != dataset_len:
dataset_sum += dataset[i]
if i < subset_len:
subset_sum += subset[i]
i+=1
dataset_sum/dataset_len, subset_sum/subset_len
###Output
_____no_output_____ |
Machine Learning Experiment 8.ipynb | ###Markdown
Country Data for k = 2
###Code
model = KMeans(n_clusters=2, n_init=4, verbose=1, max_iter=3)
model.fit(X1)
model.cluster_centers_
model.inertia_
###Output
_____no_output_____
###Markdown
Credit Card Data for k = 2
###Code
model = KMeans(n_clusters=2, n_init=4, verbose=1, max_iter=3)
model.fit(X2)
model.cluster_centers_
model.inertia_
###Output
_____no_output_____
###Markdown
Country Data for k = 5
###Code
model = KMeans(n_clusters=5, n_init=4, verbose=1, max_iter=3)
model.fit(X1)
model.cluster_centers_
model.inertia_
###Output
_____no_output_____ |
work_generic/main_generic.ipynb | ###Markdown
FINN PreprocessorThis notebook contains the code to preprocess active fire data for the FINN fire emissions model. 1. User specified configurations To run the FINN preprocessor, you should specify the following: 1. `tag_af`: a tag or name for your active fire dataset, e.g., `'my-af-data'`2. `af_fnames`: a list of file paths to active fire shape files (e.g., downloaded from [FIRMS](https://firms.modaps.eosdis.nasa.gov/)) 3. `year_rst`: MODIS raster data year to be used for the analysisFinal output files will be named "out_tag_af_*.csv" and "out_tag_af_*.shp". Default settingsTo use a small example dataset that is bundled with the FINN preprocessor, you can use the following settings:```pythontag_af = 'testOTS_092018'af_fnames = [ '../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp', '../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',]year_rst = 2017``` Custom settings with MODIS and VIIRS**If you have custom MODIS and VIIRS shapefiles** you must specify the file path(s) to the active fire data shapefiles. Ensure that the shapefiles are unzipped and placed within the `finn_preproc/data/` directory:```pythontag_af = 'custom-job'af_fname = [ '../data/fire_archive_M6_...', '../data/fire_archive_V1_...']year_rst = 2017``` Custom settings with MODIS or VIIRSAlternatively, if you have just MODIS or VIIRS, you can specify one file path, e.g.:```pythontag_af = 'custom-job'af_fname = [ '../data/fire_archive_M6_...',]year_rst = 2017```
###Code
# Edit this cell to specify custom files, or use the defaults for a test run
tag_af = 'testOTS_092018'
af_fnames = [
'../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp',
'../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',
]
# MODIS raster datasets' year
year_rst = 2017
###Output
_____no_output_____
###Markdown
Defining a year for MODIS land cover and vegetation dataMODIS land cover types and vegetation continuous field will be downloaded for the region of active fire input. Specify the year by changing the value of `year_rst`.
###Code
# Specify the year to use for MODIS land cover and vegetation data here
year_rst = 2017
###Output
_____no_output_____
###Markdown
The rest of code below should run without modification. 2. Module imports
###Code
# python libraries
import sys
import os
import re
import glob
import datetime
import subprocess
import shlex
from urllib.parse import urlparse
from importlib import reload
import gdal
import matplotlib.pylab as plt
# finn preproc codes
sys.path = sys.path + ['../code_anaconda']
import downloader
import af_import
import rst_import
import polygon_import
import run_step1
import run_step2
import export_shp
import plotter
!psql -d finn -c 'CREATE LANGUAGE plpython3u;'
###Output
_____no_output_____
###Markdown
3. Import AF dataset Test active fire data files exist This particular sample AF dataset are provided by FINN developper. In other applications, it will be user's resoponsibility to provide shape file for active fire in specified path/name.
###Code
# check input file exists
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
re_shp_nrt = re.compile('(MODIS_C6|VNP14IMGTDL_NRT)_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] == '.shp':
# you are good
print('OK')
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
m = re_zip.match(af_fname)
if m:
arcname = m.group()[0]
sname = 'fire_archive_%s.shp' % arcname
else:
# i cannot predict name of shp file...
import zipfile
# find what shp file included...?
raise RuntileError('specify .shp file in af_names list!')
arcname,sname = None, None
else:
raise RuntimeError('specify .shp file in af_names list!')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess the zip file name
pn,fn=os.path.split(af_fname)
# see if it's the sample giant archive we provide
if fn == 'fire_archive_M6_28864.shp':
zurl = 'https://s3-us-west-2.amazonaws.com/earthlab-finn/2016-global-DL_FIRE_M6_28864.zip'
zn = '2016-global-DL_FIRE_M6_28864.zip'
zname = os.path.join(pn, zn)
sname = fn
if not os.path.exists(zname):
print('downloading the sample AF file: %s' % zn)
subprocess.run(['wget', '-P', pn, zurl], check=True)
else:
# see if it's an archive of AF
m = re_shp.match(fn)
if m:
arcname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
# see if it's NRT data
m = re_shp_nrt.match(fn)
if m:
# NRT downloads
zname = af_fname[:-4] + '.zip'
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
if zname:
print('unzipping: %s' % zname)
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.join(pn, sname))
af_fnames[i] = os.path.join(pn, sname)
print('OK: done')
###Output
_____no_output_____
###Markdown
Import active fire data Go ahead and import into database.Be careful!! The code has no safe guard and wipe the schema for the scheama "af_tag_af" and starts over. Let me think the design a bit more for now.
###Code
reload(af_import)
# TODO this is destructive need to safe guard!
# tell user schema is there, list table names and # of row of each. Ask her to delete manually or something to proceed
af_import.main(tag_af, af_fnames)
print()
for i,fn in enumerate(af_fnames):
print(fn)
if len(af_fnames) == 1:
tblname = '"af_%s".af_in' % tag_af
else:
tblname = '"af_%s".af_in_%d' % (tag_af, i+1)
p = subprocess.run(['psql', '-c', 'select count(*) from %s;' % tblname], stdout=subprocess.PIPE)
print(p.stdout.decode())
###Output
_____no_output_____
###Markdown
4. Download raster datasets Settings for Land Surface Datasets (land cover, vegetation continuous field, region definieons)
###Code
# tag to identify datasets, automatically set to be modlct_YYYY, modvcf_YYYY
tag_lct = 'modlct_%d' % year_rst
tag_vcf = 'modvcf_%d' % year_rst
# tag for the region number polygon
tag_regnum = 'regnum'
# definition of variables in the raster files
rasters = [
{
'tag': tag_lct,
'kind': 'thematic',
'variable': 'lct'
},
{
'tag': tag_vcf,
'kind': 'continuous',
'variables': ['tree', 'herb', 'bare'],
},
{
'tag': tag_regnum,
'kind': 'polygons',
'variable_in': 'region_num',
'variable': 'regnum',
},
]
###Output
_____no_output_____
###Markdown
Check if the extent of raster dataset in the database encloses all fire
###Code
# confirm that raster data covers extent of AF data
reload(af_import)
dct = {}
for i,fn in enumerate(af_fnames):
for tag_rst in (tag_lct, tag_vcf):
if len(af_fnames) == 1:
cnts = af_import.check_raster_contains_fire(
'"raster"."skel_rst_%s"' % tag_rst,
'"af_%s"."af_in"' % (tag_af)
)
else:
cnts = af_import.check_raster_contains_fire(
'"raster"."skel_rst_%s"' % tag_rst,
'"af_%s"."af_in_%d"' % (tag_af, i+1)
)
print(os.path.basename(fn), tag_rst, cnts)
dct[(fn,tag_rst)] = cnts
# **TODO** In some case "fire" is ditected over the the ocean and there is no raster for that part of earth
# need to check if that's the case for the 'n_not_contained' fire
need_to_import_raster = False
print()
if any(_['n_not_contained'] > 0 for _ in dct.values()):
print('Some fire are not conained in raster')
print('Will download/import raster dataset')
need_to_import_raster = True
else:
print('All fire are is conained in raster')
print('no need to download/import raster dataset')
###Output
_____no_output_____
###Markdown
Raster files URL and directories to save data
###Code
if need_to_import_raster:
# all raster downloads are stored in following dir
download_rootdir = '../downloads'
if need_to_import_raster:
# earthdata's URL for landcover and VCF
is_leap = (year_rst % 4 == 0)
url_lct = 'https://e4ftl01.cr.usgs.gov/MOTA/MCD12Q1.006/%d.01.01/' % year_rst
url_vcf = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD44B.006/%d.03.%02d/' % (year_rst, 5 if is_leap else 6)
ddir_lct = download_rootdir +'/'+ ''.join(urlparse(url_lct)[1:3])
ddir_vcf = download_rootdir +'/'+ ''.join(urlparse(url_vcf)[1:3])
print('LCT downloads goes to %s' % ddir_lct)
print('VCF downloads goes to %s' % ddir_vcf)
###Output
_____no_output_____
###Markdown
Download land cover type raster
###Code
if need_to_import_raster:
reload(downloader)
downloader.download_only_needed(url = url_lct, droot = download_rootdir, pnts=af_fnames[0])
###Output
_____no_output_____
###Markdown
Verify LCT files' checksum. If a file is correpted, the file is downloaded again.
###Code
if need_to_import_raster:
downloader.purge_corrupted(ddir = ddir_lct, url=url_lct)
###Output
_____no_output_____
###Markdown
Do similar for vegetation continuous field data
###Code
if need_to_import_raster:
downloader.download_only_needed(url = url_vcf, droot = download_rootdir, pnts=af_fnames[0])
if need_to_import_raster:
downloader.purge_corrupted(ddir_vcf, url=url_vcf)
###Output
_____no_output_____
###Markdown
5. Import raster datasets Downloaded files need preprocessing, which is to extract the only raster band needed, and also make coordinate system to be WGS84. Intermediate files are created in following directories.
###Code
workdir_lct = '../proc_rst_%s' % tag_lct
workdir_vcf = '../proc_rst_%s' % tag_vcf
workdir_regnum = '../proc_rst_%s' % tag_regnum
print('LCT preprocessing occurs in %s' % workdir_lct)
print('VCF preprocessing occurs in %s' % workdir_vcf)
print('RegNum preprocessing occurs in %s' % workdir_regnum)
###Output
_____no_output_____
###Markdown
Import land cover type First grab hdf file names from the download directory
###Code
if need_to_import_raster:
search_string = "%(ddir_lct)s/MCD12Q1.A%(year_rst)s001.h??v??.006.*.hdf" % dict(
ddir_lct = ddir_lct, year_rst=year_rst)
fnames_lct = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_lct) )
if len(fnames_lct) == 0:
raise RuntimeError("check if downloads are successful and search string to be correct: %s" % search_string)
###Output
_____no_output_____
###Markdown
The next command performs three tasks, "merge", "resample" and "import". First two task creates intermediate GeoTiff files in work_dir. Last task actually import the data into database's raster schema.
###Code
if need_to_import_raster:
reload(rst_import)
rst_import.main(tag_lct, fnames=fnames_lct, workdir = workdir_lct)
###Output
_____no_output_____
###Markdown
At this point you should able to see the raster in the database using QGIS. I am also trying to make quick check here creating simple image for QA, but use of GIS tool is encouraged.
###Code
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plot('raster.o_32_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import vegetation continuous fields Analogous steps repeated for vegetation continous fields.
###Code
if need_to_import_raster:
# grab hdf file names
search_string = "%(ddir_vcf)s/MOD44B.A%(year)s065.h??v??.006.*.hdf" % dict(
ddir_vcf = ddir_vcf, year=year_rst)
fnames_vcf = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_vcf) )
if len(fnames_vcf) == 0:
raise RuntimeError("check if downloads are successfull and search string to be correct: %s" % search_string)
if need_to_import_raster:
reload(rst_import)
rst_import.main(tag_vcf, fnames=fnames_vcf, workdir = workdir_vcf)
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plot('raster.o_32_rst_%s' % tag_vcf)
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import countries of the world shapefile This is actually not a raster but vector data of polygons. But since it serves conceptually similar function as raster (specify attribute for a given geographic location), I treat it as if it is a raster dataset.
###Code
if not os.path.exists(os.path.join(workdir_regnum, 'All_Countries.shp')):
subprocess.run(['wget', '-P', workdir_regnum,
'https://s3-us-west-2.amazonaws.com/earthlab-finn/All_Countries.zip'],
check=True)
subprocess.run(['unzip', os.path.join(workdir_regnum, 'All_Countries.zip'), '-d' , workdir_regnum ],
check=True)
reload(polygon_import)
polygon_import.main('regnum', shpname = os.path.join(workdir_regnum, 'All_Countries.shp'))
###Output
_____no_output_____
###Markdown
6. Process active fire data
###Code
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] == '.shp':
# you are good
pass
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
m = re_zip.match(af_fname)
if m:
arcname = m.group()[0]
sname = 'fire_archive_%s.shp' % arcname
else:
# i cannot predict name of shp file...
import zipfile
# find what shp file included...?
arcname,sname = None, None
else:
raise RuntimeError('need to specify .shp file name')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess zip file name
pn,fn=os.path.split(af_fname)
m = re_shp.match(fn)
if m:
archname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('check file exists.')
if zname:
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.jon(pn, sname))
af_fnames[i] = os.path.jon(pn, sname)
###Output
_____no_output_____
###Markdown
Running "step 1" grouping points
###Code
reload(run_step1)
run_step1.main(tag_af, ver='v7m')
###Output
_____no_output_____
###Markdown
Running "step 2" intersection with raster datasets
###Code
reload(run_step2)
assert run_step2.ver == 'v8b'
run_step2.main(tag_af, rasters)
###Output
_____no_output_____
###Markdown
7. Export the output Default output directory is this diretory (where you have this Jupyter Notebook file), and output file has long name of having tag of each datasets.
###Code
outdir = '.'
shpname = 'out_{0}_{1}_{2}_{3}.shp'.format(tag_af, tag_lct, tag_vcf, tag_regnum)
schema = 'af_' + tag_af
tblname = 'out_{0}_{1}_{2}'.format(tag_lct, tag_vcf, tag_regnum)
flds = ('v_lct', 'f_lct', 'v_tree', 'v_herb', 'v_bare', 'v_regnum')
reload(export_shp)
export_shp.main(outdir, schema, tblname, flds, shpname)
###Output
_____no_output_____
###Markdown
FINN PreprocessorThis notebook contains the code to preprocess active fire data for the FINN fire emissions model. 1. User specified configurations To run the FINN preprocessor, you should specify the following: 1. `tag_af`: a tag or name for your active fire dataset, e.g., 'modvrs_global_2018'. See below for guidance for picking `tag_af`.2. `af_fnames`: a list of file paths to active fire shape files (e.g., downloaded from [FIRMS](https://firms.modaps.eosdis.nasa.gov/)) 3. `year_rst`: MODIS raster data year to be used for the analysis. It is recommended to use the data from a year prior to the Active Fire data, as the impact of large fire may affects the VCF product of the year, resulting in smaller estimate of fuel loadings.4. `filter_persistent_source`: ActiveFire product has inferred hot spot "type" field (0 = presumed vegetation fire, 1 = active volcano, 2 = other static land source, 3 = offshore) ref: Table 9 of [Giglio et al. "MODIS Collection 6 Active Fire Product User’s Guide Revision B" 2018](https://cdn.earthdata.nasa.gov/conduit/upload/10575/MODIS_C6_Fire_User_Guide_B.pdf). By making this option True, "1 active volcano" and "2 other static land source" are dropped. Recommended to keep this True5. `export_frp`: experimental implementation to export arithtmetic average frp value of all accepted observations within each divided polygon. Default is False, not to calculate this.6. `download_global_raster`: By making this option to True, code downloads all HDF raster LCT/VCF file for specified year. By making this false, it identify necessary HDF files enough to cover the extent of AF input. For regional/continental application with fixed area of interest, this would make sense. For global application, or multiple continent application, or your area of interest changes a lot across runs, it may make sense to make this True to grab everything, because finding which HDF file is needed does take some time. Default value is False.It is recommended to use descriptive name which identified data source (modis vs viirs), spatial extent and time period. e.g. `modvrs_global_2018` for Modis/VIIRS combined detection for global scale modeling for year 2018, or `mod_tx_2012sum` for modis detection for Texas for Summer 2012 (definition of "summer" is implicit here).The variable `tag_af` is used as part of PostGIS database schema and table ("[SQL Identifier](https://www.postgresql.org/docs/current/sql-syntax-lexical.htmlSQL-SYNTAX-IDENTIFIERS)"), and you have to start with letter (a-z, A-Z) or underscore (\_) followed by letter/underscore/digit (0-9).Final output files will be named "out_tag_af_*.csv" and "out_tag_af_*.shp".It is also recommended that you create copy of directory contining this notebook (`work_generic`) and process your custom run there, so that one activefire procerssing is housed in different directory. Main reason is that this notebook is tied to GitHub repository. So if the repository got updated and if you want to pull the changes, it may causes conflict if this file had modifications by you. By making copy before you edit, main_generic.ipynb can be updated from repository. Default settingsTo use a small example dataset that is bundled with the FINN preprocessor, you can use the following settings:```pythontag_af = 'testOTS_092018'af_fnames = [ '../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp', '../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',]year_rst = 2017filter_persistent_sources = Truedownload_global_raster = False``` Custom settings with MODIS and VIIRS**If you have both custom MODIS and VIIRS shapefiles** you must specify the file path(s) to the active fire data shapefiles. Ensure that the shapefiles are unzipped and placed within the `finn_preproc/data/` directory. You may specify as many files of mixed type (MODIS or VIIRS, archived data vs near real time data). Tool is go through each dataset, identify the datasource, and process accordingly. All data are used simultaneously to yield one unified set of outputs.It is encouraged to use `tag_af` variable wisely, as explained earlier. For example, "modvrs_conus_2016" to represents MODIS/VIIRS hybrid processing, for contiguous united state region, for year 2016. In other words, `{data_source}_{geographic_region}_{time_period}` would be a possible convention of yours.It is also recommended to use raster from a year before the fire, as explained above.```pythontag_af = 'modvrs_global_2018'af_fname = [ '../data/fire_archive_M6_...', '../data/fire_archive_V1_...']year_rst = 2017filter_persistent_sources = Truedownload_global_raster = True``` Custom settings with MODIS or VIIRSAlternatively, if you have just MODIS or VIIRS, you can specify one file path. For example, in the example below there is only one M6 file provided (supposedly for summer 2012 for Texas). ```pythontag_af = 'mod_tx_2012sum'af_fname = [ '../data/fire_archive_M6_...',]year_rst = 2011filter_persistent_sources = Truedownload_global_raster = False```
###Code
tag_af = 'testOTS_092018'
af_fnames = [
'../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp',
'../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',
]
year_rst = 2017
filter_persistent_sources = True
export_frp = False
download_global_raster = False
###Output
_____no_output_____
###Markdown
As long as default set of raster (MODIS LCT, MODIS VCF and global region definition of Wiedinmyer 2011), no need to edit following sections. However below specifies data source of raster (in terms of table name inside `raster` schema) and how data is processed. `variable` specifies variable name in the final output CSV file. `kind` field specifies if the data are to be treated as thematic (categorical) data or continous data.
###Code
# tag to identify datasets, automatically set to be modlct_YYYY, modvcf_YYYY
tag_lct = 'modlct_%d' % year_rst
tag_vcf = 'modvcf_%d' % year_rst
# tag for the region number polygon
tag_regnum = 'regnum'
# definition of variables in the raster files
rasters = [
{
'tag': tag_lct,
'kind': 'thematic',
'variable': 'lct'
},
{
'tag': tag_vcf,
'kind': 'continuous',
'variables': ['tree', 'herb', 'bare'],
},
{
'tag': tag_regnum,
'kind': 'polygons',
'variable_in': 'region_num',
'variable': 'regnum',
},
]
if export_frp:
tag_frp = 'frp'
rasters.append(
{
'tag': tag_frp,
'kind': 'input',
'variable_in': 'frp',
'variable': 'frp',
}
)
###Output
_____no_output_____
###Markdown
Options below specifies if extra output/log will be saved
###Code
# save *.shp of the output, so that you can hold onto polygons
save_shp_of_output = False
# save *.html version of this notebook upon exit, so that you can keep records
save_html_of_notebook = True
###Output
_____no_output_____
###Markdown
Options below will delete the intermediate files at section 8, after the processing is finished. Make these into True if you know that this notebook work (with your settings) and you don't need to review the intermediates.
###Code
# deletes entire schema in the database for the AF data processed in this notebook
wipe_intermediate_vector_in_db = False
# like above, but not deleting entire schema, but delete only giant intermediate table in schema,
# so that you can still QA the data in db,if needed
# (make wipe_intermediate_vector_in_db to be False to use this feature)
clean_intermediate_vector_in_db = True
# deletes hdf files downloaded from EARTHDATA for particular year used in this notebook
wipe_downloaded_hdf = True
# deletes intermediate geotiff files (found in proc_rst_XXX directory) for particular year used in this notebook
wipe_intermediate_geotiff = True
# deletes table of raster data imported into database (praticular year used in this notebook)
wipe_intermediate_rst_in_db = False
###Output
_____no_output_____
###Markdown
The rest of code below should run without modification. 2. Generic preparation of the preprocessor system (linux) environment for debugging purpose
###Code
!env | sort
###Output
_____no_output_____
###Markdown
For QA purpose, echo what version of postgresql/postgis got used
###Code
!psql -d finn -c "select version();"
!psql -d finn -c "select postgis_full_version();"
###Output
_____no_output_____
###Markdown
python version used in postgresql/plpython
###Code
!psql -d finn -f ../code_anaconda/testpy.sql
###Output
_____no_output_____
###Markdown
python module import
###Code
# python libraries
import sys
import os
import re
import glob
import datetime
import subprocess
import time
import shlex
from urllib.parse import urlparse
from importlib import reload
import gdal
import matplotlib.pylab as plt
import psycopg2
# finn preproc codes
sys.path = sys.path + ['../code_anaconda']
import downloader
import af_import
import rst_import
import polygon_import
import run_step1
import run_step2
import export_shp
import plotter
import run_extra
import notebook_util
###Output
_____no_output_____
###Markdown
Need PL/Python3
###Code
try:
subprocess.run(['psql', '-d', 'finn', '-c', 'CREATE LANGUAGE plpython3u;'], check=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if 'already exists' in e.stderr.decode():
print(e.stderr.decode().replace('ERROR','OK').strip())
else:
print('\n\nGot Error!!!')
print(e.stderr.decode())
raise e
###Output
_____no_output_____
###Markdown
Need wireframe of modis files
###Code
reload(rst_import)
rst_import.prep_modis_tile()
###Output
_____no_output_____
###Markdown
3. Import AF dataset Test active fire data files exist This particular sample AF dataset are provided by FINN developper. In other applications, it will be user's resoponsibility to provide shape file for active fire in specified path/name.
###Code
# check input file exists
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
re_shp_nrt = re.compile('(MODIS_C6|VNP14IMGTDL_NRT)_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] in ('.shp', '.csv'):
# you are good
print('OK')
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
# asssume that this ls DL_FIRE*.zip file from FIRMS
# look for shape file name
import zipfile
myshp = [_ for _ in zipfile.ZipFile(zname).namelist() if _.endswith('.shp')]
if len(myshp) != 0:
sname = myshp[0]
m = re_shp.match(sname)
assert m
else:
raise RuntileError('specify .shp file in af_names list!')
arcname,sname = None, None
else:
raise RuntimeError('specify .shp or .csv file in af_names list!')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess the zip file name
pn,fn=os.path.split(af_fname)
# see if it's the sample giant archive we provide
if fn == 'fire_archive_M6_28864.shp':
zurl = 'https://s3-us-west-2.amazonaws.com/earthlab-finn/2016-global-DL_FIRE_M6_28864.zip'
zn = '2016-global-DL_FIRE_M6_28864.zip'
zname = os.path.join(pn, zn)
sname = fn
if not os.path.exists(zname):
print('downloading the sample AF file: %s' % zn)
subprocess.run(['wget', '-P', pn, zurl], check=True)
else:
# see if it's an archive of AF
m = re_shp.match(fn)
if m:
arcname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
# see if it's NRT data
m = re_shp_nrt.match(fn)
if m:
# NRT downloads
zname = af_fname[:-4] + '.zip'
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
if zname:
print('unzipping: %s' % zname)
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.join(pn, sname))
af_fnames[i] = os.path.join(pn, sname)
print('OK: done')
###Output
_____no_output_____
###Markdown
Import active fire data Go ahead and import into database.Be careful!! The code has no safe guard and wipe the schema for the scheama "af_tag_af" and starts over. Let me think the design a bit more for now.
###Code
reload(af_import)
# TODO this is destructive need to safe guard!
# tell user schema is there, list table names and # of row of each. Ask her to delete manually or something to proceed
af_import.main(tag_af, af_fnames)
print()
for i,fn in enumerate(af_fnames):
print(fn)
tblname = '"af_%s".af_in_%d' % (tag_af, i+1)
p = subprocess.run(['psql', '-c', 'select count(*) from %s;' % tblname], stdout=subprocess.PIPE)
print(p.stdout.decode())
###Output
_____no_output_____
###Markdown
Show spatial distribution of imported active fire, just for QA purpose. You should also be able to use QGIS to visualize actual imported data as schema name "af_XXX", table name "af_in_N", where XXX is tag_af you specified in the first box, N is serial number for active fire files you listed.
###Code
reload(plotter)
try:
plotter.plotter.plot(['"af_%s".af_in_%d' % (tag_af, i+1)
for i,fn in enumerate(af_fnames)], density=True)
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
4. Download raster datasets Check if the extent of raster dataset in the database encloses all fire
###Code
reload(downloader)
if download_global_raster:
results_indb = downloader.find_tiles_indb(data='POLYGON((-180 89,-180 -89,180 -89,180 89,-180 89))',
knd='wkt', tag_lct=tag_lct, tag_vcf=tag_vcf)
else:
results_indb = downloader.find_tiles_indb(data='"af_%s"' % tag_af,
knd='schema', tag_lct=tag_lct, tag_vcf=tag_vcf)
print(results_indb)
print()
if results_indb['n_need'] == 0:
print('All fire are is conained in raster')
print('no need to download/import raster dataset')
need_to_import_lct = False
need_to_import_vcf = False
else:
print('Some fire are not conained in raster')
print('Will download/import raster dataset')
need_to_import_lct = (len(results_indb['tiles_missing_lct']) > 0)
need_to_import_vcf = (len(results_indb['tiles_missing_vcf']) > 0)
tiles_required_lct = results_indb['tiles_required_lct']
tiles_required_vcf = results_indb['tiles_required_vcf']
print()
reload(downloader)
need_to_import_regnum = not downloader.find_table_indb('raster', 'rst_%s' % tag_regnum)
if need_to_import_regnum:
print('Region definiton shapefile will be imported')
else:
print('no need to import Region definiton shapefile')
# Date range of active fire
# TODO use this to set "year_rst" ?
reload(af_import)
af_dates = af_import.get_dates(schema = '"af_%s"'%tag_af, combined=True)
print('first day in AF file:', af_dates.min())
print('last day in AF file:', af_dates.max())
###Output
_____no_output_____
###Markdown
Raster files URL and directories to save data
###Code
# all raster downloads are stored in following dir
download_rootdir = '../downloads'
# earthdata's URL for landcover and VCF
is_leap = (year_rst % 4 == 0)
url_lct = 'https://e4ftl01.cr.usgs.gov/MOTA/MCD12Q1.006/%d.01.01/' % year_rst
url_vcf = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD44B.006/%d.03.%02d/' % (year_rst, 5 if is_leap else 6)
ddir_lct = download_rootdir +'/'+ ''.join(urlparse(url_lct)[1:3])
ddir_vcf = download_rootdir +'/'+ ''.join(urlparse(url_vcf)[1:3])
if any((need_to_import_lct, need_to_import_vcf)):
print('LCT downloads goes to %s' % ddir_lct)
print('VCF downloads goes to %s' % ddir_vcf)
###Output
_____no_output_____
###Markdown
Download land cover type raster
###Code
if need_to_import_lct:
reload(downloader)
downloader.download_only_needed(url = url_lct, droot = download_rootdir, tiles=tiles_required_lct)
###Output
_____no_output_____
###Markdown
Verify LCT files' checksum. If a file is correpted, the file is downloaded again.
###Code
if need_to_import_lct:
downloader.purge_corrupted(ddir = ddir_lct, url=url_lct)
###Output
_____no_output_____
###Markdown
Do similar for vegetation continuous field data
###Code
if need_to_import_vcf:
downloader.download_only_needed(url = url_vcf, droot = download_rootdir, tiles=tiles_required_vcf)
if need_to_import_vcf:
downloader.purge_corrupted(ddir_vcf, url=url_vcf)
###Output
_____no_output_____
###Markdown
5. Import raster datasets Downloaded files need preprocessing, which is to extract the only raster band needed, and also make coordinate system to be WGS84. Intermediate files are created in following directories.
###Code
workdir_lct = '../proc_rst_%s' % tag_lct
workdir_vcf = '../proc_rst_%s' % tag_vcf
workdir_regnum = '../proc_rst_%s' % tag_regnum
if need_to_import_lct:
print('LCT preprocessing occurs in %s' % workdir_lct)
if need_to_import_vcf:
print('VCF preprocessing occurs in %s' % workdir_vcf)
if need_to_import_regnum:
print('RegNum preprocessing occurs in %s' % workdir_regnum)
###Output
_____no_output_____
###Markdown
Import land cover type First grab hdf file names from the download directory
###Code
if need_to_import_lct:
search_string = "%(ddir_lct)s/MCD12Q1.A%(year_rst)s001.h??v??.006.*.hdf" % dict(
ddir_lct = ddir_lct, year_rst=year_rst)
fnames_lct = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_lct) )
if len(fnames_lct) == 0:
raise RuntimeError("check if downloads are successful and search string to be correct: %s" % search_string)
###Output
_____no_output_____
###Markdown
The next command performs three tasks, "merge", "resample" and "import". First two task creates intermediate GeoTiff files in work_dir. Last task actually import the data into database's raster schema.
###Code
if need_to_import_lct:
reload(rst_import)
rst_import.main(tag_lct, fnames=fnames_lct, workdir = workdir_lct)
###Output
_____no_output_____
###Markdown
At this point you should able to see the raster in the database using QGIS. It is located in schema "raster", table name either one of three, "rst_modlct_YYYY", "o_32_rst_modlct_YYYY", or "o_256_rst_modlct_YYYY", where YYYY is year of raster. "o_" version of raster were for visualization purpose small scale map (greater spatial extent) where as the table without "o_" is the actual data used in processing.I am also trying to make quick check here creating simple image for QA, but use of GIS tool is encouraged.
###Code
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plotter.plot('raster.o_32_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e:
try:
plotter.plotter.plot('raster.o_256_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e2:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import vegetation continuous fields Analogous steps repeated for vegetation continous fields.
###Code
if need_to_import_vcf:
# grab hdf file names
search_string = "%(ddir_vcf)s/MOD44B.A%(year)s065.h??v??.006.*.hdf" % dict(
ddir_vcf = ddir_vcf, year=year_rst)
fnames_vcf = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_vcf) )
if len(fnames_vcf) == 0:
raise RuntimeError("check if downloads are successfull and search string to be correct: %s" % search_string)
if need_to_import_vcf:
reload(rst_import)
rst_import.main(tag_vcf, fnames=fnames_vcf, workdir = workdir_vcf)
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plotter.plot('raster.o_32_rst_%s' % tag_vcf)
except Exception as e:
try:
plotter.plotter.plot('raster.o_256_rst_%s' % tag_vcf)
except Exception as e2:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import countries of the world shapefile This is actually not a raster but vector data of polygons. But since it serves conceptually similar function as raster (specify attribute for a given geographic location), I treat it as if it is a raster dataset.
###Code
if need_to_import_regnum:
if not os.path.exists(os.path.join(workdir_regnum, 'All_Countries.shp')):
subprocess.run(['wget', '-P', workdir_regnum,
'https://s3-us-west-2.amazonaws.com/earthlab-finn/All_Countries.zip'],
check=True)
subprocess.run(['unzip', os.path.join(workdir_regnum, 'All_Countries.zip'), '-d' , workdir_regnum ],
check=True)
if need_to_import_regnum:
reload(polygon_import)
polygon_import.main(tag_regnum, shpname = os.path.join(workdir_regnum, 'All_Countries.shp'))
###Output
_____no_output_____
###Markdown
6. Process active fire data Running "step 1" grouping points
###Code
reload(run_step1)
run_step1.main(tag_af, filter_persistent_sources = filter_persistent_sources)
###Output
_____no_output_____
###Markdown
Running "step 2" intersection with raster datasets
###Code
reload(run_step2)
run_step2.main(tag_af, rasters)
###Output
_____no_output_____
###Markdown
7. Export the output Default output directory is this diretory (where you have this Jupyter Notebook file), and output file has long name of having tag of each datasets.
###Code
tag_rasters = '_'.join([rst['tag'] for rst in rasters])
outdir = '.'
shpname = 'out_{tag_af}_{tag_rasters}.shp'.format(tag_af=tag_af, tag_rasters=tag_rasters)
###Output
_____no_output_____
###Markdown
Name of schema/table/fields in the database, all defined in `rasters` variables near the top of this notebook
###Code
schema = 'af_' + tag_af
tblname = 'out_{tag_rasters}'.format(tag_rasters=tag_rasters)
fields_to_export = []
for rst in rasters:
if rst['kind'] == 'thematic':
fields_to_export.extend([prefix+'_'+rst['variable'] for prefix in ('v', 'f')])
else:
if 'variables' in rst:
fields_to_export.extend(['v_'+v for v in rst['variables']])
else:
fields_to_export.append('v_'+rst['variable'])
###Output
_____no_output_____
###Markdown
Export
###Code
reload(export_shp)
export_shp.main(outdir, schema, tblname, fields_to_export, shpname,
csvonly=(not save_shp_of_output))
###Output
_____no_output_____
###Markdown
Summary of processing
###Code
reload(run_extra)
run_extra.summarize_log(tag_af)
###Output
_____no_output_____
###Markdown
8. Disk use summary and clean up (if you wish) You see summary of disk use and code which optionally cleans up disk use by removing intermediates. Point/Polygons All intermediate datasets for vector processing are stored in the database. See the usage of individual tables and grand total below.
###Code
qry_af = """SELECT table_schema || '.' || table_name AS table_full_name,
pg_size_pretty(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"')) AS size
FROM information_schema.tables
WHERE table_schema = '%(sch_af)s'
ORDER BY pg_total_relation_size('"' || table_schema || '"."' || table_name || '"') DESC;""" % dict(
sch_af=('af_%s' % tag_af),
)
qry_af_tot = """SELECT table_schema,
pg_size_pretty(sum(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"'))) AS size
FROM information_schema.tables
WHERE table_schema = '%(sch_af)s'
GROUP BY table_schema;""" % dict(
sch_af=('af_%s' % tag_af),
)
print('Disk use by AF processing intermediate tables inside the database\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_af], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
print('Total\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_af_tot], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
###Output
_____no_output_____
###Markdown
If all processing above is successful and you would like to reclaim the diskspace, you can change value of `wipe_intermediate_vector_in_db` defined in the first section of this noetbook to `True`.
###Code
# cleans intermediate vector
if wipe_intermediate_vector_in_db == True:
# wipe out completely
print(tag_af)
sch_af = 'af_%s' % tag_af
print(sch_af)
qry = 'DROP SCHEMA "%s" CASCADE;' % sch_af
cmd = ['psql', '-d', os.environ["PGDATABASE"], '-c', qry]
subprocess.run(cmd, check=True)
elif clean_intermediate_vector_in_db == True:
# be slective and leave work_pnt (imported/cleaned points) and out_* (results being exported)
reload(run_extra)
run_extra.clean_db_af(tag_af, rasters)
###Output
_____no_output_____
###Markdown
Raster Intermediate data for raster dataset are located in three different places. First the original HDF format file you downloaded from EarthData website. Seond is GeoTiff format data prepared for importing into PostGIS dataset. The third is raster dataset inside PostGIS dataset. Diskspace usage of each are summarized below.
###Code
cmd = ['du', '-csh', ddir_lct, ddir_vcf]
p = subprocess.run(cmd, stdout=subprocess.PIPE)
print('Disk use by downloaded raster hdf files')
print(p.stdout.decode())
cmd = ['du', '-csh', workdir_lct, workdir_vcf]
p = subprocess.run(cmd, stdout=subprocess.PIPE)
print('Disk use by intermediate raster processing files')
print(p.stdout.decode())
qry_rst = """SELECT table_schema || '.' || table_name AS table_full_name,
pg_size_pretty(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"')) AS size
FROM information_schema.tables
WHERE table_name ~ '^.*(%(tbl_lct)s|%(tbl_vcf)s)'
ORDER BY pg_total_relation_size('"' || table_schema || '"."' || table_name || '"') DESC;""" % dict(
tbl_lct=('rst_%s' % tag_lct),
tbl_vcf=('rst_%s' % tag_vcf),
)
qry_rst_tot = """SELECT table_schema,
pg_size_pretty(sum(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"'))) AS size
FROM information_schema.tables
WHERE table_name ~ '^.*(%(tbl_lct)s|%(tbl_vcf)s)'
GROUP BY table_schema;""" % dict(
sch_af=('af_%s' % tag_af),
tbl_lct=('rst_%s' % tag_lct),
tbl_vcf=('rst_%s' % tag_vcf),
)
print('Disk use by raster dataset in the database\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_rst], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
print('Total for %(tag_lct)s and %(tag_vcf)s\n' % dict(tag_lct=tag_lct, tag_vcf=tag_vcf))
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_rst_tot], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
# cleans intermediate raster
if wipe_downloaded_hdf == True:
# ditch entire download directory for the year
tgts = [ddir_lct, ddir_vcf]
cmd = ['rm', '-fr', ] + tgts
print(cmd)
subprocess.run(cmd, check=True)
if wipe_intermediate_geotiff == True:
# ditch entire processing directory
tgts = [workdir_lct, workdir_vcf]
cmd = ['rm', '-fr', ] + tgts
print(cmd)
subprocess.run(cmd, check=True)
if wipe_intermediate_rst_in_db == True:
# delete each table from the schema 'raster'
reload(rst_import)
rst_import.drop_tables(tag_lct)
rst_import.drop_tables(tag_vcf)
###Output
_____no_output_____
###Markdown
Save hardcopy of this notebook as html file
###Code
if save_html_of_notebook:
notebook_util.save_checkpoint()
nb_path = notebook_util.notebook_path()
print(nb_path)
time.sleep(5)
status = os.system('jupyter nbconvert --to html ' + os.path.basename(nb_path))
if status == 0:
print('html saved')
else:
print('save html failed')
###Output
_____no_output_____
###Markdown
FINN PreprocessorThis notebook contains the code to preprocess active fire data for the FINN fire emissions model. 1. User specified configurations To run the FINN preprocessor, you should specify the following: 1. `tag_af`: a tag or name for your active fire dataset, e.g., `'my-af-data'`2. `af_fnames`: a list of file paths to active fire shape files (e.g., downloaded from [FIRMS](https://firms.modaps.eosdis.nasa.gov/)) 3. `year_rst`: MODIS raster data year to be used for the analysisFinal output files will be named "out_tag_af_*.csv" and "out_tag_af_*.shp". Default settingsTo use a small example dataset that is bundled with the FINN preprocessor, you can use the following settings:```pythontag_af = 'testOTS_092018'af_fnames = [ '../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp', '../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',]year_rst = 2017``` Custom settings with MODIS and VIIRS**If you have custom MODIS and VIIRS shapefiles** you must specify the file path(s) to the active fire data shapefiles. Ensure that the shapefiles are unzipped and placed within the `finn_preproc/data/` directory:```pythontag_af = 'custom-job'af_fname = [ '../data/fire_archive_M6_...', '../data/fire_archive_V1_...']year_rst = 2017``` Custom settings with MODIS or VIIRSAlternatively, if you have just MODIS or VIIRS, you can specify one file path, e.g.:```pythontag_af = 'custom-job'af_fname = [ '../data/fire_archive_M6_...',]year_rst = 2017```
###Code
# tag to identify active fire dataset
tag_af = 'mod_global_2016'
# shp file names
af_fnames = [
'../sample_datasets/fire/global_2016/fire_archive_M6_28864.shp',
]
# MODIS raster datasets' year
year_rst = 2016
###Output
_____no_output_____
###Markdown
The rest of code below should run without modification. 2. Module imports
###Code
# python libraries
import sys
import os
import re
import glob
import datetime
import subprocess
import shlex
from urllib.parse import urlparse
from importlib import reload
import gdal
import matplotlib.pylab as plt
# finn preproc codes
sys.path = sys.path + ['../code_anaconda']
import downloader
import af_import
import rst_import
import polygon_import
import run_step1
import run_step2
import export_shp
import plotter
!psql -d finn -c 'CREATE LANGUAGE plpython3u;'
###Output
_____no_output_____
###Markdown
3. Import AF dataset Test active fire data files exist This particular sample AF dataset are provided by FINN developper. In other applications, it will be user's resoponsibility to provide shape file for active fire in specified path/name.
###Code
# check input file exists
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
re_shp_nrt = re.compile('(MODIS_C6|VNP14IMGTDL_NRT)_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] == '.shp':
# you are good
print('OK')
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
m = re_zip.match(af_fname)
if m:
arcname = m.group()[0]
sname = 'fire_archive_%s.shp' % arcname
else:
# i cannot predict name of shp file...
import zipfile
# find what shp file included...?
raise RuntileError('specify .shp file in af_names list!')
arcname,sname = None, None
else:
raise RuntimeError('specify .shp file in af_names list!')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess the zip file name
pn,fn=os.path.split(af_fname)
# see if it's the sample giant archive we provide
if fn == 'fire_archive_M6_28864.shp':
zurl = 'https://s3-us-west-2.amazonaws.com/earthlab-finn/2016-global-DL_FIRE_M6_28864.zip'
zn = '2016-global-DL_FIRE_M6_28864.zip'
zname = os.path.join(pn, zn)
sname = fn
if not os.path.exists(zname):
print('downloading the sample AF file: %s' % zn)
subprocess.run(['wget', '-P', pn, zurl], check=True)
else:
# see if it's an archive of AF
m = re_shp.match(fn)
if m:
arcname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
# see if it's NRT data
m = re_shp_nrt.match(fn)
if m:
# NRT downloads
zname = af_fname[:-4] + '.zip'
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
if zname:
print('unzipping: %s' % zname)
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.join(pn, sname))
af_fnames[i] = os.path.join(pn, sname)
print('OK: done')
###Output
_____no_output_____
###Markdown
Import active fire data Go ahead and import into database.Be careful!! The code has no safe guard and wipe the schema for the scheama "af_tag_af" and starts over. Let me think the design a bit more for now.
###Code
reload(af_import)
# TODO this is destructive need to safe guard!
# tell user schema is there, list table names and # of row of each. Ask her to delete manually or something to proceed
af_import.main(tag_af, af_fnames)
print()
for i,fn in enumerate(af_fnames):
print(fn)
tblname = '"af_%s".af_in_%d' % (tag_af, i+1)
p = subprocess.run(['psql', '-c', 'select count(*) from %s;' % tblname], stdout=subprocess.PIPE)
print(p.stdout.decode())
###Output
_____no_output_____
###Markdown
4. Download raster datasets Settings for Land Surface Datasets (land cover, vegetation continuous field, region definieons)
###Code
# tag to identify datasets, automatically set to be modlct_YYYY, modvcf_YYYY
tag_lct = 'modlct_%d' % year_rst
tag_vcf = 'modvcf_%d' % year_rst
# tag for the region number polygon
tag_regnum = 'regnum'
# definition of variables in the raster files
rasters = [
{
'tag': tag_lct,
'kind': 'thematic',
'variable': 'lct'
},
{
'tag': tag_vcf,
'kind': 'continuous',
'variables': ['tree', 'herb', 'bare'],
},
{
'tag': tag_regnum,
'kind': 'polygons',
'variable_in': 'region_num',
'variable': 'regnum',
},
]
###Output
_____no_output_____
###Markdown
Check if the extent of raster dataset in the database encloses all fire
###Code
reload(downloader)
results_indb = downloader.find_tiles_indb(data='"af_%s"' % tag_af, knd='schema', tag_lct=tag_lct, tag_vcf=tag_vcf)
print(results_indb)
if results_indb['n_need'] == 0:
print('All fire are is conained in raster')
print('no need to download/import raster dataset')
need_to_import_lct = False
need_to_import_vcf = False
else:
print('Some fire are not conained in raster')
print('Will download/import raster dataset')
need_to_import_lct = (len(results_indb['tiles_missing_lct']) > 0)
need_to_import_vcf = (len(results_indb['tiles_missing_vcf']) > 0)
tiles_required_lct = results_indb['tiles_required_lct']
tiles_required_vcf = results_indb['tiles_required_vcf']
need_to_import_regnum = not downloader.find_table_indb('raster', '"rst_%s"' % tag_regnum)
# Date range of active fire
# TODO use this to set "year_rst" ?
reload(af_import)
af_dates = af_import.get_dates(schema = '"af_%s"'%tag_af, combined=True)
print('first day in AF file:', af_dates.min())
print('last day in AF file:', af_dates.max())
###Output
_____no_output_____
###Markdown
Raster files URL and directories to save data
###Code
if any((need_to_import_lct, need_to_import_vcf)):
# all raster downloads are stored in following dir
download_rootdir = '../downloads'
if any((need_to_import_lct, need_to_import_vcf)):
# earthdata's URL for landcover and VCF
is_leap = (year_rst % 4 == 0)
url_lct = 'https://e4ftl01.cr.usgs.gov/MOTA/MCD12Q1.006/%d.01.01/' % year_rst
url_vcf = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD44B.006/%d.03.%02d/' % (year_rst, 5 if is_leap else 6)
ddir_lct = download_rootdir +'/'+ ''.join(urlparse(url_lct)[1:3])
ddir_vcf = download_rootdir +'/'+ ''.join(urlparse(url_vcf)[1:3])
print('LCT downloads goes to %s' % ddir_lct)
print('VCF downloads goes to %s' % ddir_vcf)
###Output
_____no_output_____
###Markdown
Download land cover type raster
###Code
if need_to_import_lct:
reload(downloader)
downloader.download_only_needed(url = url_lct, droot = download_rootdir, tiles=tiles_required_lct)
###Output
_____no_output_____
###Markdown
Verify LCT files' checksum. If a file is correpted, the file is downloaded again.
###Code
if need_to_import_lct:
downloader.purge_corrupted(ddir = ddir_lct, url=url_lct)
###Output
_____no_output_____
###Markdown
Do similar for vegetation continuous field data
###Code
if need_to_import_vcf:
downloader.download_only_needed(url = url_vcf, droot = download_rootdir, tiles=tiles_required_vcf)
if need_to_import_vcf:
downloader.purge_corrupted(ddir_vcf, url=url_vcf)
###Output
_____no_output_____
###Markdown
5. Import raster datasets Downloaded files need preprocessing, which is to extract the only raster band needed, and also make coordinate system to be WGS84. Intermediate files are created in following directories.
###Code
workdir_lct = '../proc_rst_%s' % tag_lct
workdir_vcf = '../proc_rst_%s' % tag_vcf
workdir_regnum = '../proc_rst_%s' % tag_regnum
print('LCT preprocessing occurs in %s' % workdir_lct)
print('VCF preprocessing occurs in %s' % workdir_vcf)
print('RegNum preprocessing occurs in %s' % workdir_regnum)
###Output
_____no_output_____
###Markdown
Import land cover type First grab hdf file names from the download directory
###Code
if need_to_import_lct:
search_string = "%(ddir_lct)s/MCD12Q1.A%(year_rst)s001.h??v??.006.*.hdf" % dict(
ddir_lct = ddir_lct, year_rst=year_rst)
fnames_lct = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_lct) )
if len(fnames_lct) == 0:
raise RuntimeError("check if downloads are successful and search string to be correct: %s" % search_string)
###Output
_____no_output_____
###Markdown
The next command performs three tasks, "merge", "resample" and "import". First two task creates intermediate GeoTiff files in work_dir. Last task actually import the data into database's raster schema.
###Code
if need_to_import_lct:
reload(rst_import)
rst_import.main(tag_lct, fnames=fnames_lct, workdir = workdir_lct)
###Output
_____no_output_____
###Markdown
At this point you should able to see the raster in the database using QGIS. I am also trying to make quick check here creating simple image for QA, but use of GIS tool is encouraged.
###Code
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plot('raster.o_32_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import vegetation continuous fields Analogous steps repeated for vegetation continous fields.
###Code
if need_to_import_vcf:
# grab hdf file names
search_string = "%(ddir_vcf)s/MOD44B.A%(year)s065.h??v??.006.*.hdf" % dict(
ddir_vcf = ddir_vcf, year=year_rst)
fnames_vcf = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_vcf) )
if len(fnames_vcf) == 0:
raise RuntimeError("check if downloads are successfull and search string to be correct: %s" % search_string)
if need_to_import_vcf:
reload(rst_import)
rst_import.main(tag_vcf, fnames=fnames_vcf, workdir = workdir_vcf)
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plot('raster.o_32_rst_%s' % tag_vcf)
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
_____no_output_____
###Markdown
Import countries of the world shapefile This is actually not a raster but vector data of polygons. But since it serves conceptually similar function as raster (specify attribute for a given geographic location), I treat it as if it is a raster dataset.
###Code
if need_to_import_regnum:
if not os.path.exists(os.path.join(workdir_regnum, 'All_Countries.shp')):
subprocess.run(['wget', '-P', workdir_regnum,
'https://s3-us-west-2.amazonaws.com/earthlab-finn/All_Countries.zip'],
check=True)
subprocess.run(['unzip', os.path.join(workdir_regnum, 'All_Countries.zip'), '-d' , workdir_regnum ],
check=True)
if need_to_import_regnum:
reload(polygon_import)
polygon_import.main('regnum', shpname = os.path.join(workdir_regnum, 'All_Countries.shp'))
###Output
_____no_output_____
###Markdown
6. Process active fire data
###Code
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] == '.shp':
# you are good
pass
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
m = re_zip.match(af_fname)
if m:
arcname = m.group()[0]
sname = 'fire_archive_%s.shp' % arcname
else:
# i cannot predict name of shp file...
import zipfile
# find what shp file included...?
arcname,sname = None, None
else:
raise RuntimeError('need to specify .shp file name')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess zip file name
pn,fn=os.path.split(af_fname)
m = re_shp.match(fn)
if m:
archname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('check file exists.')
if zname:
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.jon(pn, sname))
af_fnames[i] = os.path.jon(pn, sname)
###Output
_____no_output_____
###Markdown
Running "step 1" grouping points
###Code
reload(run_step1)
run_step1.main(tag_af, ver='v7m')
###Output
_____no_output_____
###Markdown
Running "step 2" intersection with raster datasets
###Code
reload(run_step2)
assert run_step2.ver == 'v8b'
run_step2.main(tag_af, rasters)
###Output
_____no_output_____
###Markdown
7. Export the output Default output directory is this diretory (where you have this Jupyter Notebook file), and output file has long name of having tag of each datasets.
###Code
outdir = '.'
shpname = 'out_{0}_{1}_{2}_{3}.shp'.format(tag_af, tag_lct, tag_vcf, tag_regnum)
schema = 'af_' + tag_af
tblname = 'out_{0}_{1}_{2}'.format(tag_lct, tag_vcf, tag_regnum)
flds = ('v_lct', 'f_lct', 'v_tree', 'v_herb', 'v_bare', 'v_regnum')
reload(export_shp)
export_shp.main(outdir, schema, tblname, flds, shpname)
###Output
_____no_output_____
###Markdown
FINN PreprocessorThis notebook contains the code to preprocess active fire data for the FINN fire emissions model. 1. User specified configurations To run the FINN preprocessor, you should specify the following: 1. `tag_af`: a tag or name for your active fire dataset, e.g., `'my_af_data'`. See below for guidance for picking `tag_af`.2. `af_fnames`: a list of file paths to active fire shape files (e.g., downloaded from [FIRMS](https://firms.modaps.eosdis.nasa.gov/)) 3. `year_rst`: MODIS raster data year to be used for the analysisIt is recommended to use descriptive name which identified data source (modis vs viirs), spatial extent and time period. e.g. `modvrs_global_2018` for Modis/VIIRS combined detection for global scale modeling for year 2018, or `mod_tx_2012sum` for modis detection for Texas for Summer 2012 (definition of "summer" is implicit here).The variable `tag_af` is used as part of PostGIS database schema and table ("[SQL Identifier](https://www.postgresql.org/docs/current/sql-syntax-lexical.htmlSQL-SYNTAX-IDENTIFIERS)"), and you have to start with letter (a-z, A-Z) or underscore (\_) followed by letter/underscore/digit (0-9).Final output files will be named "out_tag_af_*.csv" and "out_tag_af_*.shp". Default settingsTo use a small example dataset that is bundled with the FINN preprocessor, you can use the following settings:```pythontag_af = 'testOTS_092018'af_fnames = [ '../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp', '../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',]year_rst = 2017``` Custom settings with MODIS and VIIRS**If you have custom MODIS and VIIRS shapefiles** you must specify the file path(s) to the active fire data shapefiles. Ensure that the shapefiles are unzipped and placed within the `finn_preproc/data/` directory:```pythontag_af = 'custom_job1'af_fname = [ '../data/fire_archive_M6_...', '../data/fire_archive_V1_...']year_rst = 2017``` Custom settings with MODIS or VIIRSAlternatively, if you have just MODIS or VIIRS, you can specify one file path, e.g.:```pythontag_af = 'custom_job2'af_fname = [ '../data/fire_archive_M6_...',]year_rst = 2017```
###Code
tag_af = 'testOTS_092018'
af_fnames = [
'../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp',
'../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp',
]
year_rst = 2017
filter_persistent_sources = True
###Output
_____no_output_____
###Markdown
The rest of code below should run without modification. 2. Generic preparation of the preprocessor system (linux) environment for debugging purpose
###Code
!env | sort
###Output
CLICOLOR=1
DEBIAN_FRONTEND=noninteractive
EARTHDATAPW=J0N0Puedo
EARTHDATAUSER=yosuke
GIT_PAGER=cat
HOME=/root
HOSTNAME=f93f1cf8daed
JPY_PARENT_PID=420
KERNEL_LAUNCH_TIMEOUT=40
LC_CTYPE=C.UTF-8
MPLBACKEND=module://ipykernel.pylab.backend_inline
PAGER=cat
PATH=/opt/conda/bin:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PGDATABASE=finn
PGHOST=localhost
PGPASSWORD=finn
PGPORT=5432
PGUSER=finn
POSTGRES_DBNAME=finn
POSTGRES_PASS=finn
POSTGRES_USER=finn
PWD=/home/finn/work_generic
TERM=xterm-color
###Markdown
python module import
###Code
# python libraries
import sys
import os
import re
import glob
import datetime
import subprocess
import shlex
from urllib.parse import urlparse
from importlib import reload
import gdal
import matplotlib.pylab as plt
import psycopg2
# finn preproc codes
sys.path = sys.path + ['../code_anaconda']
import downloader
import af_import
import rst_import
import polygon_import
import run_step1
import run_step2
import export_shp
import plotter
import run_extra
###Output
_____no_output_____
###Markdown
Need PL/Python3
###Code
try:
subprocess.run(['psql', '-d', 'finn', '-c', 'CREATE LANGUAGE plpython3u;'], check=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if 'already exists' in e.stderr.decode():
print(e.stderr.decode().replace('ERROR','OK').strip())
else:
print('\n\nGot Error!!!')
print(e.stderr.decode())
raise e
###Output
OK: language "plpython3u" already exists
###Markdown
python version used in postgresql/plpython
###Code
!psql -d finn -f ../code_anaconda/testpy.sql
###Output
CREATE FUNCTION
testpy
---------------------------------------------
3.7.3 (default, Apr 3 2019, 05:39:12) +
[GCC 8.3.0] | numpy: 1.17.0 | networkx: 2.3
(1 row)
###Markdown
Need wireframe of modis files
###Code
reload(rst_import)
rst_import.prep_modis_tile()
###Output
OK: wireframe already exists
###Markdown
3. Import AF dataset Test active fire data files exist This particular sample AF dataset are provided by FINN developper. In other applications, it will be user's resoponsibility to provide shape file for active fire in specified path/name.
###Code
# check input file exists
print('checking if input files exist:')
re_shp = re.compile('fire_archive_(.*).shp')
re_zip = re.compile('DL_FIRE_(.*).shp')
re_shp_nrt = re.compile('(MODIS_C6|VNP14IMGTDL_NRT)_(.*).shp')
for i,af_fname in enumerate(af_fnames):
print("%s: " % af_fname, end='')
pn,fn = os.path.split(af_fname)
zname = None
if os.path.exists(af_fname):
print("exists.")
# if .zip file, need to expand.
if af_fname[-4:] == '.shp':
# you are good
print('OK')
elif af_fname[-4:] == '.zip':
# still need to unzip
zname = af_fname
m = re_zip.match(af_fname)
if m:
arcname = m.group()[0]
sname = 'fire_archive_%s.shp' % arcname
else:
# i cannot predict name of shp file...
import zipfile
# find what shp file included...?
raise RuntileError('specify .shp file in af_names list!')
arcname,sname = None, None
else:
raise RuntimeError('specify .shp file in af_names list!')
else:
print("doesn't exist.")
if af_fname[-4:] == '.shp':
# guess the zip file name
pn,fn=os.path.split(af_fname)
# see if it's the sample giant archive we provide
if fn == 'fire_archive_M6_28864.shp':
zurl = 'https://s3-us-west-2.amazonaws.com/earthlab-finn/2016-global-DL_FIRE_M6_28864.zip'
zn = '2016-global-DL_FIRE_M6_28864.zip'
zname = os.path.join(pn, zn)
sname = fn
if not os.path.exists(zname):
print('downloading the sample AF file: %s' % zn)
subprocess.run(['wget', '-P', pn, zurl], check=True)
else:
# see if it's an archive of AF
m = re_shp.match(fn)
if m:
arcname = m.groups()[0]
zname = os.path.join( pn, 'DL_FIRE_%s.zip' % arcname)
sname = fn
print(' found zip: %s' % zname)
else:
# see if it's NRT data
m = re_shp_nrt.match(fn)
if m:
# NRT downloads
zname = af_fname[:-4] + '.zip'
sname = fn
print(' found zip: %s' % zname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
else:
raise RuntimeError('cannot find file: %s' % af_fname)
if zname:
print('unzipping: %s' % zname)
subprocess.run(['unzip', '-uo', zname, '-d', os.path.dirname(zname)],
check=True)
assert os.path.exists(os.path.join(pn, sname))
af_fnames[i] = os.path.join(pn, sname)
print('OK: done')
###Output
checking if input files exist:
../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp: exists.
OK
../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp: exists.
OK
###Markdown
Import active fire data Go ahead and import into database.Be careful!! The code has no safe guard and wipe the schema for the scheama "af_tag_af" and starts over. Let me think the design a bit more for now.
###Code
reload(af_import)
# TODO this is destructive need to safe guard!
# tell user schema is there, list table names and # of row of each. Ask her to delete manually or something to proceed
af_import.main(tag_af, af_fnames)
print()
for i,fn in enumerate(af_fnames):
print(fn)
tblname = '"af_%s".af_in_%d' % (tag_af, i+1)
p = subprocess.run(['psql', '-c', 'select count(*) from %s;' % tblname], stdout=subprocess.PIPE)
print(p.stdout.decode())
###Output
['psql', '-c', 'CREATE SCHEMA "af_testOTS_092018";']
cmd:
ogr2ogr -progress -f PostgreSQL -overwrite PG:dbname='finn' -lco SPATIAL_INDEX=GIST -lco SCHEMA=af_testOTS_092018 -lco GEOMETRY_NAME=geom -lco FID=gid -nln af_in_1 ../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp
['ogr2ogr', '-progress', '-f', 'PostgreSQL', '-overwrite', "PG:dbname='finn'", '-lco', 'SPATIAL_INDEX=GIST', '-lco', 'SCHEMA=af_testOTS_092018', '-lco', 'GEOMETRY_NAME=geom', '-lco', 'FID=gid', '-nln', 'af_in_1', '../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp']
cmd:
ogr2ogr -progress -f PostgreSQL -overwrite PG:dbname='finn' -lco SPATIAL_INDEX=GIST -lco SCHEMA=af_testOTS_092018 -lco GEOMETRY_NAME=geom -lco FID=gid -nln af_in_2 ../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp
['ogr2ogr', '-progress', '-f', 'PostgreSQL', '-overwrite', "PG:dbname='finn'", '-lco', 'SPATIAL_INDEX=GIST', '-lco', 'SCHEMA=af_testOTS_092018', '-lco', 'GEOMETRY_NAME=geom', '-lco', 'FID=gid', '-nln', 'af_in_2', '../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp']
../sample_datasets/fire/testOTS_092018/fire_archive_M6_23960.shp
count
-------
498
(1 row)
../sample_datasets/fire/testOTS_092018/fire_archive_V1_23961.shp
count
-------
2037
(1 row)
###Markdown
Show spatial distribution of imported active fire, just for QA purpose. You should also be able to use QGIS to visualize actual imported data as schema name "af_XXX", table name "af_in_N", where XXX is tag_af you specified in the first box, N is serial number for active fire files you listed.
###Code
reload(plotter)
try:
plotter.plotter.plot(['"af_%s".af_in_%d' % (tag_af, i+1)
for i,fn in enumerate(af_fnames)], density=True)
except Exception as e:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
['"af_testOTS_092018".af_in_1', '"af_testOTS_092018".af_in_2']
[[-125. 38. -116. 47.]
[-124. 38. -116. 47.]]
Driver : GeoTIFF
Files : ['/vsimem/from_postgis']
Size : (169, 169)
Coordinate System : GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]
Origin : (-125.0, 47.0)
Pixel Size : (0.05333333333333334, -0.05333333333333334)
Metadata : {}
OrderedDict([('No Data Value', 0.0), ('Min', None), ('Max', None), ('Scale', None), ('Unit Type', '')])
(169, 169)
###Markdown
4. Download raster datasets Settings for Land Surface Datasets (land cover, vegetation continuous field, region definieons)
###Code
# tag to identify datasets, automatically set to be modlct_YYYY, modvcf_YYYY
tag_lct = 'modlct_%d' % year_rst
tag_vcf = 'modvcf_%d' % year_rst
# tag for the region number polygon
tag_regnum = 'regnum'
# definition of variables in the raster files
rasters = [
{
'tag': tag_lct,
'kind': 'thematic',
'variable': 'lct'
},
{
'tag': tag_vcf,
'kind': 'continuous',
'variables': ['tree', 'herb', 'bare'],
},
{
'tag': tag_regnum,
'kind': 'polygons',
'variable_in': 'region_num',
'variable': 'regnum',
},
]
###Output
_____no_output_____
###Markdown
Check if the extent of raster dataset in the database encloses all fire
###Code
reload(downloader)
results_indb = downloader.find_tiles_indb(data='"af_%s"' % tag_af, knd='schema', tag_lct=tag_lct, tag_vcf=tag_vcf)
print(results_indb)
print()
if results_indb['n_need'] == 0:
print('All fire are is conained in raster')
print('no need to download/import raster dataset')
need_to_import_lct = False
need_to_import_vcf = False
else:
print('Some fire are not conained in raster')
print('Will download/import raster dataset')
need_to_import_lct = (len(results_indb['tiles_missing_lct']) > 0)
need_to_import_vcf = (len(results_indb['tiles_missing_vcf']) > 0)
tiles_required_lct = results_indb['tiles_required_lct']
tiles_required_vcf = results_indb['tiles_required_vcf']
print()
reload(downloader)
need_to_import_regnum = not downloader.find_table_indb('raster', 'rst_%s' % tag_regnum)
if need_to_import_regnum:
print('Region definiton shapefile will be imported')
else:
print('no need to import Region definiton shapefile')
# Date range of active fire
# TODO use this to set "year_rst" ?
reload(af_import)
af_dates = af_import.get_dates(schema = '"af_%s"'%tag_af, combined=True)
print('first day in AF file:', af_dates.min())
print('last day in AF file:', af_dates.max())
###Output
first day in AF file: 2017-07-14
last day in AF file: 2017-07-21
###Markdown
Raster files URL and directories to save data
###Code
# all raster downloads are stored in following dir
download_rootdir = '../downloads'
# earthdata's URL for landcover and VCF
is_leap = (year_rst % 4 == 0)
url_lct = 'https://e4ftl01.cr.usgs.gov/MOTA/MCD12Q1.006/%d.01.01/' % year_rst
url_vcf = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD44B.006/%d.03.%02d/' % (year_rst, 5 if is_leap else 6)
ddir_lct = download_rootdir +'/'+ ''.join(urlparse(url_lct)[1:3])
ddir_vcf = download_rootdir +'/'+ ''.join(urlparse(url_vcf)[1:3])
if any((need_to_import_lct, need_to_import_vcf)):
print('LCT downloads goes to %s' % ddir_lct)
print('VCF downloads goes to %s' % ddir_vcf)
###Output
_____no_output_____
###Markdown
Download land cover type raster
###Code
if need_to_import_lct:
reload(downloader)
downloader.download_only_needed(url = url_lct, droot = download_rootdir, tiles=tiles_required_lct)
###Output
_____no_output_____
###Markdown
Verify LCT files' checksum. If a file is correpted, the file is downloaded again.
###Code
if need_to_import_lct:
downloader.purge_corrupted(ddir = ddir_lct, url=url_lct)
###Output
_____no_output_____
###Markdown
Do similar for vegetation continuous field data
###Code
if need_to_import_vcf:
downloader.download_only_needed(url = url_vcf, droot = download_rootdir, tiles=tiles_required_vcf)
if need_to_import_vcf:
downloader.purge_corrupted(ddir_vcf, url=url_vcf)
###Output
_____no_output_____
###Markdown
5. Import raster datasets Downloaded files need preprocessing, which is to extract the only raster band needed, and also make coordinate system to be WGS84. Intermediate files are created in following directories.
###Code
workdir_lct = '../proc_rst_%s' % tag_lct
workdir_vcf = '../proc_rst_%s' % tag_vcf
workdir_regnum = '../proc_rst_%s' % tag_regnum
if need_to_import_lct:
print('LCT preprocessing occurs in %s' % workdir_lct)
if need_to_import_vcf:
print('VCF preprocessing occurs in %s' % workdir_vcf)
if need_to_import_regnum:
print('RegNum preprocessing occurs in %s' % workdir_regnum)
###Output
_____no_output_____
###Markdown
Import land cover type First grab hdf file names from the download directory
###Code
if need_to_import_lct:
search_string = "%(ddir_lct)s/MCD12Q1.A%(year_rst)s001.h??v??.006.*.hdf" % dict(
ddir_lct = ddir_lct, year_rst=year_rst)
fnames_lct = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_lct) )
if len(fnames_lct) == 0:
raise RuntimeError("check if downloads are successful and search string to be correct: %s" % search_string)
###Output
_____no_output_____
###Markdown
The next command performs three tasks, "merge", "resample" and "import". First two task creates intermediate GeoTiff files in work_dir. Last task actually import the data into database's raster schema.
###Code
if need_to_import_lct:
reload(rst_import)
rst_import.main(tag_lct, fnames=fnames_lct, workdir = workdir_lct)
###Output
_____no_output_____
###Markdown
At this point you should able to see the raster in the database using QGIS. It is located in schema "raster", table name either one of three, "rst_modlct_YYYY", "o_32_rst_modlct_YYYY", or "o_256_rst_modlct_YYYY", where YYYY is year of raster. "o_" version of raster were for visualization purpose small scale map (greater spatial extent) where as the table without "o_" is the actual data used in processing.I am also trying to make quick check here creating simple image for QA, but use of GIS tool is encouraged.
###Code
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plotter.plot('raster.o_32_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e:
try:
plotter.plotter.plot('raster.o_256_rst_%s' % tag_lct, '../code_anaconda/modlct.clr')
except Exception as e2:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
Driver : GeoTIFF
Files : ['/vsimem/from_postgis']
Size : (6750, 3187)
Coordinate System : GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]
Origin : (-180.0, 80.0)
Pixel Size : (0.0533333344, -0.0533333344)
Metadata : {'AREA_OR_POINT': 'Area'}
OrderedDict([('No Data Value', 255.0), ('Min', None), ('Max', None), ('Scale', 1.0), ('Unit Type', '')])
(3187, 6750)
###Markdown
Import vegetation continuous fields Analogous steps repeated for vegetation continous fields.
###Code
if need_to_import_vcf:
# grab hdf file names
search_string = "%(ddir_vcf)s/MOD44B.A%(year)s065.h??v??.006.*.hdf" % dict(
ddir_vcf = ddir_vcf, year=year_rst)
fnames_vcf = sorted(glob.glob(search_string))
print('found %d hdf files' % len(fnames_vcf) )
if len(fnames_vcf) == 0:
raise RuntimeError("check if downloads are successfull and search string to be correct: %s" % search_string)
if need_to_import_vcf:
reload(rst_import)
rst_import.main(tag_vcf, fnames=fnames_vcf, workdir = workdir_vcf)
%matplotlib inline
import plotter
reload(plotter)
try:
plotter.plotter.plot('raster.o_32_rst_%s' % tag_vcf)
except Exception as e:
try:
plotter.plotter.plot('raster.o_256_rst_%s' % tag_vcf)
except Exception as e2:
print("Got this error: " + str(e))
print("Didn't work, use QGIS!")
pass
###Output
Driver : GeoTIFF
Files : ['/vsimem/from_postgis']
Size : (6750, 2625)
Coordinate System : GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]
Origin : (-180.0, 80.0)
Pixel Size : (0.0533333344, -0.0533333344)
Metadata : {'AREA_OR_POINT': 'Area'}
OrderedDict([('No Data Value', 255.0), ('Min', None), ('Max', None), ('Scale', 1.0), ('Unit Type', '')])
OrderedDict([('No Data Value', 255.0), ('Min', None), ('Max', None), ('Scale', 1.0), ('Unit Type', '')])
OrderedDict([('No Data Value', 255.0), ('Min', None), ('Max', None), ('Scale', 1.0), ('Unit Type', '')])
(2625, 6750, 3)
###Markdown
Import countries of the world shapefile This is actually not a raster but vector data of polygons. But since it serves conceptually similar function as raster (specify attribute for a given geographic location), I treat it as if it is a raster dataset.
###Code
if need_to_import_regnum:
if not os.path.exists(os.path.join(workdir_regnum, 'All_Countries.shp')):
subprocess.run(['wget', '-P', workdir_regnum,
'https://s3-us-west-2.amazonaws.com/earthlab-finn/All_Countries.zip'],
check=True)
subprocess.run(['unzip', os.path.join(workdir_regnum, 'All_Countries.zip'), '-d' , workdir_regnum ],
check=True)
if need_to_import_regnum:
reload(polygon_import)
polygon_import.main(tag_regnum, shpname = os.path.join(workdir_regnum, 'All_Countries.shp'))
###Output
_____no_output_____
###Markdown
6. Process active fire data Running "step 1" grouping points
###Code
reload(run_step1)
run_step1.main(tag_af, filter_persistent_sources = filter_persistent_sources)
###Output
starting prep: 2019-12-17 20:47:44.827252
['psql', '-f', '../code_anaconda/step1_prep_v7m.sql', '-v', 'tag=testOTS_092018', '-v', 'filter_persistent_sources=True']
(datetime.date(2017, 7, 13), datetime.date(2017, 7, 21))
0
starting work 2017-07-13: 2019-12-17 20:47:45.408321
starting work 2017-07-14: 2019-12-17 20:47:45.839864
starting work 2017-07-15: 2019-12-17 20:47:46.620364
starting work 2017-07-16: 2019-12-17 20:47:47.819895
starting work 2017-07-17: 2019-12-17 20:47:48.738391
starting work 2017-07-18: 2019-12-17 20:47:49.359145
starting work 2017-07-19: 2019-12-17 20:47:49.843436
starting work 2017-07-20: 2019-12-17 20:47:50.266411
starting work 2017-07-21: 2019-12-17 20:47:50.756180
###Markdown
Running "step 2" intersection with raster datasets
###Code
reload(run_step2)
run_step2.main(tag_af, rasters)
###Output
_____no_output_____
###Markdown
7. Export the output Default output directory is this diretory (where you have this Jupyter Notebook file), and output file has long name of having tag of each datasets.
###Code
outdir = '.'
shpname = 'out_{0}_{1}_{2}_{3}.shp'.format(tag_af, tag_lct, tag_vcf, tag_regnum)
schema = 'af_' + tag_af
tblname = 'out_{0}_{1}_{2}'.format(tag_lct, tag_vcf, tag_regnum)
flds = ('v_lct', 'f_lct', 'v_tree', 'v_herb', 'v_bare', 'v_regnum')
reload(export_shp)
export_shp.main(outdir, schema, tblname, flds, shpname)
###Output
_____no_output_____
###Markdown
Summary of processing
###Code
reload(run_extra)
run_extra.summarize_log(tag_af)
###Output
_____no_output_____
###Markdown
8. Disk use summary and clean up (if you wish) You see summary of disk use and code which optionally cleans up disk use by removing intermediates. Point/Polygons All intermediate datasets for vector processing are stored in the database. See the usage of individual tables and grand total below.
###Code
qry_af = """SELECT table_schema || '.' || table_name AS table_full_name,
pg_size_pretty(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"')) AS size
FROM information_schema.tables
WHERE table_schema = '%(sch_af)s'
ORDER BY pg_total_relation_size('"' || table_schema || '"."' || table_name || '"') DESC;""" % dict(
sch_af=('af_%s' % tag_af),
)
qry_af_tot = """SELECT table_schema,
pg_size_pretty(sum(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"'))) AS size
FROM information_schema.tables
WHERE table_schema = '%(sch_af)s'
GROUP BY table_schema;""" % dict(
sch_af=('af_%s' % tag_af),
)
print('Disk use by AF processing intermediate tables inside the database\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_af], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
print('Total\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_af_tot], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
###Output
_____no_output_____
###Markdown
If all processing above is successful and you would like to reclaim the diskspace, you can (1) change value of `wipe_intermediate_vector` below to `True` and run the cell.
###Code
wipe_intermediate_vector = False
if wipe_intermediate_vector == True:
print(tag_af)
sch_af = 'af_%s' % tag_af
print(sch_af)
qry = 'DROP SCHEMA "%s" CASCADE;' % sch_af
cmd = ['psql', '-d', os.environ["PGDATABASE"], '-c', qry]
subprocess.run(cmd, check=True)
###Output
_____no_output_____
###Markdown
Raster Intermediate data for raster dataset are located in three different places. First the original HDF format file you downloaded from EarthData website. Seond is GeoTiff format data prepared for importing into PostGIS dataset. The third is raster dataset inside PostGIS dataset. Diskspace usage of each are summarized below.
###Code
cmd = ['du', '-csh', ddir_lct, ddir_vcf]
p = subprocess.run(cmd, stdout=subprocess.PIPE)
print('Disk use by downloaded raster hdf files')
print(p.stdout.decode())
cmd = ['du', '-csh', workdir_lct, workdir_vcf]
p = subprocess.run(cmd, stdout=subprocess.PIPE)
print('Disk use by intermediate raster processing files')
print(p.stdout.decode())
qry_rst = """SELECT table_schema || '.' || table_name AS table_full_name,
pg_size_pretty(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"')) AS size
FROM information_schema.tables
WHERE table_name ~ '^.*(%(tbl_lct)s|%(tbl_vcf)s)'
ORDER BY pg_total_relation_size('"' || table_schema || '"."' || table_name || '"') DESC;""" % dict(
tbl_lct=('rst_%s' % tag_lct),
tbl_vcf=('rst_%s' % tag_vcf),
)
qry_rst_tot = """SELECT table_schema,
pg_size_pretty(sum(pg_total_relation_size('"' || table_schema || '"."' || table_name || '"'))) AS size
FROM information_schema.tables
WHERE table_name ~ '^.*(%(tbl_lct)s|%(tbl_vcf)s)'
GROUP BY table_schema;""" % dict(
sch_af=('af_%s' % tag_af),
tbl_lct=('rst_%s' % tag_lct),
tbl_vcf=('rst_%s' % tag_vcf),
)
print('Disk use by raster dataset in the database\n')
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_rst], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
print('Total for %(tag_lct)s and %(tag_vcf)s\n' % dict(tag_lct=tag_lct, tag_vcf=tag_vcf))
p = subprocess.run(['psql', '-d', 'finn', '-c', qry_rst_tot], stdout=subprocess.PIPE, check=True)
print(p.stdout.decode())
wipe_downloaded_hdf = False
wipe_intermediate_geotiff = False
wipe_intermediate_rst_in_db = False
if wipe_downloaded_hdf == True:
# ditch entire download directory for the year
tgts = [ddir_lct, ddir_vcf]
cmd = ['rm', '-fr', ] + tgts
subprocess.run(cmd, check=True)
if wipe_intermediate_geotiff == True:
# ditch entire processing directory
tgts = [workdir_lct, workdir_vcf]
cmd = ['rm', '-fr', ] + tgts
print(cmd)
subprocess.run(cmd, check=True)
if wipe_intermediate_rst_in_db == True:
# delete each table from the schema 'raster'
reload(rst_import)
rst_import.drop_tables(tag_lct)
rst_import.drop_tables(tag_vcf)
###Output
_____no_output_____ |
nlp_pubmed_rct_classification.ipynb | ###Markdown
Medical Abstract Classification using Natural Language Processing The objective is to build a deep learning model which makes medical research paper abstract easier to read. - Dataset used in this project is the `PubMed 200k RCT Dataset for Sequential Sentence Classification in Medical Abstract`: https://arxiv.org/abs/1710.06071 - The initial deep learning research paper was built with the PubMed 200k RCT. - Dataset has about `200,000 labelled Randomized Control Trial abstracts`. - The goal of the project was build NLP models with the dataset to classify sentences in sequential order. - As the RCT research papers with unstructured abstracts slows down researchers navigating the literature. - The unstructured abstracts are sometimes hard to read and understand especially when it can disrupt time management and deadlines. - This NLP model can classify the abstract sentences into its respective roles: - Such as `Objective`, `Methods`, `Results` and `Conclusions`. The PubMed 200k RCT Dataset - https://github.com/Franck-Dernoncourt/pubmed-rct Similar projects using the dataset: - Claim Extraction for Scientific Publications 2018: https://github.com/titipata/detecting-scientific-claim **Abstract** PubMed 200k RCT is new dataset based on PubMed for sequential sentence classification. The dataset consists of approximately 200,000 abstracts of randomized controlled trials, totaling 2.3 million sentences. Each sentence of each abstract is labeled with their role in the abstract using one of the following classes: background, objective, method, result, or conclusion. The purpose of releasing this dataset is twofold. First, the majority of datasets for sequential short-text classification (i.e., classification of short texts that appear in sequences) are small: we hope that releasing a new large dataset will help develop more accurate algorithms for this task. Second, from an application perspective, researchers need better tools to efficiently skim through the literature. Automatically classifying each sentence in an abstract would help researchers read abstracts more efficiently, especially in fields where abstracts may be long, such as the medical field. **Data Dictionary**- `PubMed 20k` is a subset of `PubMed 200k`. I.e., any abstract present in `PubMed 20k` is also present in `PubMed 200k`.- `PubMed_200k_RCT` is the same as `PubMed_200k_RCT_numbers_replaced_with_at_sign`, except that in the latter all numbers had been replaced by `@`. (same for `PubMed_20k_RCT` vs. `PubMed_20k_RCT_numbers_replaced_with_at_sign``).- Since Github file size limit is 100 MiB, we had to compress `PubMed_200k_RCT\train.7z` and `PubMed_200k_RCT_numbers_replaced_with_at_sign\train.zip`. - To uncompress `train.7z`, you may use `7-Zip` on Windows, `Keka` on Mac OS X, or `p7zip` on Linux. Importing data and EDA
###Code
!git clone https://github.com/Franck-Dernoncourt/pubmed-rct.git
!ls pubmed-rct
###Output
Cloning into 'pubmed-rct'...
remote: Enumerating objects: 33, done.[K
remote: Counting objects: 100% (3/3), done.[K
remote: Compressing objects: 100% (3/3), done.[K
remote: Total 33 (delta 0), reused 0 (delta 0), pack-reused 30[K
Unpacking objects: 100% (33/33), done.
Checking out files: 100% (13/13), done.
PubMed_200k_RCT
PubMed_200k_RCT_numbers_replaced_with_at_sign
PubMed_20k_RCT
PubMed_20k_RCT_numbers_replaced_with_at_sign
README.md
###Markdown
Initial Data exploration and modelling with PubMed_20k dataset
###Code
!ls pubmed-rct/PubMed_20k_RCT_numbers_replaced_with_at_sign
# imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import random
import tensorflow as tf
# functions pre-written for workflow
!wget https://raw.githubusercontent.com/hecshzye/nlp-medical-abstract-pubmed-rct/main/helper_functions.py
from helper_functions import create_tensorboard_callback, calculate_results, plot_loss_curves
# Function for reading the document
def get_doc(filename):
with open(filename, "r") as f:
return f.readlines()
data_dir = "pubmed-rct/PubMed_20k_RCT_numbers_replaced_with_at_sign/"
filenames = [data_dir + filename for filename in os.listdir(data_dir)]
filenames
# Preprocessing
train_lines = get_doc(data_dir+"train.txt")
train_lines[:30]
###Output
_____no_output_____
###Markdown
**Data dictionary**`\t` = tab seperator`\n` = new line`` = abstract ID`"line_number"` = line position`"text"` = text line`"total_lines"` = total number of lines in one abstract`"target"` = objective of the abstract
###Code
# Function for preprocessing the data
def preprocessing_text_with_line_number(filename):
input_lines = get_doc(filename)
abstract_lines = ""
abstract_samples = []
for line in input_lines:
if line.startswith("###"):
abstract_id = line
abstract_lines = ""
elif line.isspace():
abstract_line_split = abstract_lines.splitlines()
for abstract_line_number, abstract_line in enumerate(abstract_line_split):
line_data = {}
target_text_split = abstract_line.split("\t")
line_data["target"] = target_text_split[0]
line_data["text"] = target_text_split[1].lower()
line_data["line_number"] = abstract_line_number
line_data["total_lines"] = len(abstract_line_split) - 1
abstract_samples.append(line_data)
else:
abstract_lines += line
return abstract_samples
# Extracting data using the function
train_samples = preprocessing_text_with_line_number(data_dir + "train.txt")
val_samples = preprocessing_text_with_line_number(data_dir + "dev.txt")
test_samples = preprocessing_text_with_line_number(data_dir + "test.txt")
len(train_samples), len(test_samples), len(val_samples)
train_samples[:10]
# Creating a DataFrame
train_df = pd.DataFrame(train_samples)
val_df = pd.DataFrame(val_samples)
test_df = pd.DataFrame(test_samples)
train_df.head()
train_df.target.value_counts()
train_df.total_lines.plot.hist();
train_df.line_number.plot.hist();
# List of sentences (abstract text lines -> lists)
train_sentences = train_df["text"].tolist()
val_sentences = val_df["text"].tolist()
test_sentences = test_df["text"].tolist()
len(train_sentences), len(val_sentences), len(test_sentences)
train_sentences[:20]
###Output
_____no_output_____
###Markdown
Preprocessing for the modelling
###Code
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(sparse=False)
train_labels_one_hot = one_hot_encoder.fit_transform(train_df["target"].to_numpy().reshape(-1, 1))
val_labels_one_hot = one_hot_encoder.transform(val_df["target"].to_numpy().reshape(-1, 1))
test_labels_one_hot = one_hot_encoder.transform(test_df["target"].to_numpy().reshape(-1, 1))
train_labels_one_hot
# Labelling
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
train_labels_encoded = label_encoder.fit_transform(train_df["target"].to_numpy())
val_labels_encoded = label_encoder.transform(val_df["target"].to_numpy())
test_labels_encoded = label_encoder.transform(test_df["target"].to_numpy())
train_labels_encoded
# Defining the classes
num_classes = len(label_encoder.classes_)
class_names = label_encoder.classes_
num_classes, class_names
###Output
_____no_output_____
###Markdown
Modellling model_1
###Code
# model_1
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
model_1 = Pipeline([
("tf-idf", TfidfVectorizer()),
("clf", MultinomialNB())
])
model_1.fit(X=train_sentences,
y=train_labels_encoded)
model_1.score(X=val_sentences,
y=val_labels_encoded)
# Prediction on model_1
model_1_preds = model_1.predict(val_sentences)
model_1_preds
# Evaluation
model_1_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_1_preds)
model_1_results
###Output
_____no_output_____
###Markdown
model_2 (with sequencing)
###Code
# model_2 data preprocessing
from tensorflow.keras import layers
# average length of sentence
sentence_len = [len(sentence.split()) for sentence in train_sentences]
avg_sentence_len = np.mean(sentence_len)
avg_sentence_len
plt.hist(sentence_len, bins=10);
# check the % of sentences length between 0-50 & max sentence length
output_seq_len = int(np.percentile(sentence_len, 95))
output_seq_len, max(sentence_len)
###Output
_____no_output_____
###Markdown
Vectorization
###Code
# Section 3.2 states that the vocabulary size is 68,000 - https://arxiv.org/pdf/1710.06071.pdf
max_tokens = 68000
# text vectorization
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
text_vectorizer = TextVectorization(max_tokens=max_tokens,
output_sequence_length=55)
# adapting text vectorizer to training sentences
text_vectorizer.adapt(train_sentences)
# testing the text vectorizer
import random
target_sentence = random.choice(train_sentences)
print(f"Text:\n{target_sentence}")
print(f"\nLength of text: {len(target_sentence.split())}")
print(f"\nVectorized text:\n{text_vectorizer([target_sentence])}")
# Number of words in the training vocabulary
rct_20k_text_vocab = text_vectorizer.get_vocabulary()
print(f"Total number of words in vocabulary: {len(rct_20k_text_vocab)}"),
print(f"Most common words: {rct_20k_text_vocab[:5]}")
print(f"Least common words: {rct_20k_text_vocab[-5:]}")
# Configuration
text_vectorizer.get_config()
# Embedding token layer
token_embed = layers.Embedding(input_dim=len(rct_20k_text_vocab),
output_dim=128,
mask_zero=True,
name="token_embedding")
# View embedded token layer
print(f"before vectorization:\n{target_sentence}\n")
vectorized__sentence = text_vectorizer([target_sentence])
print(f"after vectorization:\n{vectorized__sentence}\n")
embedded_sentence = token_embed(vectorized__sentence)
print(f"after embedding:\n{embedded_sentence}\n")
print(f"shape after embedding: {embedded_sentence.shape}")
# Using Tensorflow dataset API for fast processing
train_dataset = tf.data.Dataset.from_tensor_slices((train_sentences, train_labels_one_hot))
valid_dataset = tf.data.Dataset.from_tensor_slices((val_sentences, val_labels_one_hot))
test_dataset = tf.data.Dataset.from_tensor_slices((test_sentences, test_labels_one_hot))
train_dataset
# Convert the data into batches
train_dataset = train_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
valid_dataset = valid_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
train_dataset
# Modelling Conv1D model_2
inputs= layers.Input(shape=(1,), dtype=tf.string)
text_vectors = text_vectorizer(inputs)
token_embeddings = token_embed(text_vectors)
x = layers.Conv1D(64, kernel_size=5, padding="same", activation="relu")(token_embeddings)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model_2 = tf.keras.Model(inputs, outputs)
model_2.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model_2_history = model_2.fit(train_dataset,
steps_per_epoch=int(0.1 * len(train_dataset)),
epochs=3,
validation_data=valid_dataset,
validation_steps=int(0.1 * len(valid_dataset)))
model_2.summary()
# Evaluation
model_2.evaluate(valid_dataset)
# Predictions
model_2_pred_probs = model_2.predict(valid_dataset)
model_2_pred_probs
# Turning the prediction probabilities into classes
model_2_preds = tf.argmax(model_2_pred_probs, axis=1)
model_2_preds
# Evaluating the results
model_2_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_2_preds)
model_2_results
###Output
_____no_output_____
###Markdown
model_3 using feature extraction
###Code
# Pretrained model universal-sentence-encoder from hub
import tensorflow_hub as hub
tf_hub_embedding_layer = hub.KerasLayer("https://tfhub.dev/google/universal-sentence-encoder/4",
trainable=False,
name="universal_sentence_encoder")
# Testing
sample_sentence_for_training = random.choice(train_sentences)
print(f"Sample training sentence:\n{sample_sentence_for_training}\n")
use_embedded_sentence = tf_hub_embedding_layer([sample_sentence_for_training])
print(f"Sample sentence after embedding:\n{use_embedded_sentence[0][:30]} (truncated_output)...\n")
print(f"Length of the embedded sentence:\n{len(use_embedded_sentence[0])}")
# Modelling model_3
inputs = layers.Input(shape=[], dtype=tf.string)
pretrained_embedding = tf_hub_embedding_layer(inputs)
x = layers.Dense(128, activation="relu")(pretrained_embedding)
outputs = layers.Dense(5, activation="softmax")(x)
model_3 = tf.keras.Model(inputs=inputs,
outputs=outputs)
model_3.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model_3.fit(train_dataset,
epochs=3,
steps_per_epoch=int(0.1 * len(train_dataset)),
validation_data=valid_dataset,
validation_steps=int(0.1 * len(valid_dataset)))
model_3.summary()
# Evaluate model_3
model_3.evaluate(valid_dataset)
# Predictions
model_3_pred_probs = model_3.predict(valid_dataset)
model_3_pred_probs
# Convert the pred_probs to classes
model_3_preds = tf.argmax(model_3_pred_probs, axis=1)
model_3_preds
# Evaluating the results
model_3_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_3_preds)
model_3_results
###Output
_____no_output_____
###Markdown
model_4 with character embeddings
###Code
# Function for character splitting
def split_character(text):
return " ".join(list(text))
split_character(sample_sentence_for_training)
# split the sequence
train_char = [split_character(sentence) for sentence in train_sentences]
val_char = [split_character(sentence) for sentence in val_sentences]
test_char = [split_character(sentence) for sentence in test_sentences]
print(train_char[0])
# Average length of the character
character_length = [len(sentence) for sentence in train_sentences]
mean_character_length = np.mean(character_length)
mean_character_length
plt.hist(character_length, bins=10);
###Output
_____no_output_____
###Markdown
From the plot, we can observe that most of the sequences are between 0-200 characters long.
###Code
# for 95% of character length of sequences
output_seq_character_length = int(np.percentile(character_length, 95))
output_seq_character_length
# Vectorizing and character embeddings
import string
alphabet = string.ascii_lowercase + string.digits + string.punctuation
alphabet
# tokenizing the character
NUM_CHAR_TOKENS = len(alphabet) + 2
character_vectorizer = TextVectorization(max_tokens=NUM_CHAR_TOKENS,
output_sequence_length=output_seq_character_length,
standardize="lower_and_strip_punctuation",
name="character_vectorizer")
character_vectorizer.adapt(train_char)
# Sample of character vocabulary
character_vocab = character_vectorizer.get_vocabulary()
print(f"Total number of unique characters: {len(character_vocab)}")
print(f"10 most common characters: {character_vocab[:10]}")
print(f"10 least common characters: {character_vocab[-10:]}")
# Sample of character vectorizer
import random
sample_train_character = random.choice(train_char)
print(f" Text (in characters):\n{sample_train_character}")
print(f"\nCharacter length: {len(sample_train_character.split())}")
vectorized__character = character_vectorizer([sample_train_character])
print(f"\nCharacter Vectorized:\n{vectorized__character}")
print(f"\nVectorized character length: {len(vectorized__character[0])}")
# Character embedding layer
character_embed = layers.Embedding(input_dim=NUM_CHAR_TOKENS,
output_dim=25,
mask_zero=False,
name="character_embed")
# Sample
print(f"Character text before embeddings:\n{sample_train_character}\n")
character_embed_example = character_embed(character_vectorizer([sample_train_character]))
print(f"Embedded characters:\n{character_embed_example}\n")
print(f"Shape of embedded character: {character_embed_example.shape}")
# Modelling model_4 with Conv1D
inputs = layers.Input(shape=(1,), dtype="string")
character_vectors = character_vectorizer(inputs)
character_embeddings = character_embed(character_vectors)
x = layers.Conv1D(63, kernel_size=5, padding="same", activation="relu")(character_embeddings)
x = layers.GlobalMaxPool1D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model_4 = tf.keras.Model(inputs=inputs,
outputs=outputs,
name="model_4_conv1D_character_embedding")
model_4.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# Character dataset
train_character_dataset = tf.data.Dataset.from_tensor_slices((train_char, train_labels_one_hot)).batch(32).prefetch(tf.data.AUTOTUNE)
val_character_dataset = tf.data.Dataset.from_tensor_slices((val_char, val_labels_one_hot)).batch(32).prefetch(tf.data.AUTOTUNE)
model_4_history = model_4.fit(train_character_dataset,
epochs=3,
steps_per_epoch=int(0.1 * len(train_character_dataset)),
validation_data=val_character_dataset,
validation_steps=int(0.1 * len(val_character_dataset)))
model_4.summary()
# model_4 evaluation
model_4.evaluate(val_character_dataset)
# Predictions
model_4_pred_probs = model_4.predict(val_character_dataset)
# Predictions to classes
model_4_preds = tf.argmax(model_4_pred_probs, axis=1)
# Results
model_4_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_4_preds)
model_4_results
###Output
_____no_output_____
###Markdown
Not good enough at 66% accuracy, compared to model_1, model_2 & model_3. model_5 with pretrained tokens and character embeddings
###Code
# Tokens inputs
token_inputs = layers.Input(shape=[], dtype=tf.string, name="token_input")
token_embeddings = tf_hub_embedding_layer(token_inputs)
token_output = layers.Dense(128, activation="relu")(token_embeddings)
token_model = tf.keras.Model(inputs=token_inputs,
outputs=token_output)
# Character inputs
character_inputs = layers.Input(shape=(1,), dtype=tf.string, name="character_input")
character_vectors = character_vectorizer(character_inputs)
character_embeddings = character_embed(character_vectors)
character_biLSTM = layers.Bidirectional(layers.LSTM(25))(character_embeddings)
character_model = tf.keras.Model(inputs=character_inputs,
outputs=character_biLSTM)
# token & character concatenation
token_character_concat = layers.Concatenate(name="token_character_hybrid")([token_model.output,
character_model.output])
# Output layers
combined_dropout = layers.Dropout(0.5)(token_character_concat)
combined_dense = layers.Dense(200, activation="relu")(combined_dropout)
final_dropout = layers.Dropout(0.5)(combined_dense)
output_layer = layers.Dense(num_classes, activation="softmax")(final_dropout)
model_5 = tf.keras.Model(inputs=[token_model.input, character_model.input],
outputs=output_layer,
name="model_5_hybrid")
model_5.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# Combining both of the training dataset & batching
train_character_token_data = tf.data.Dataset.from_tensor_slices((train_sentences, train_char))
train_character_token_labels = tf.data.Dataset.from_tensor_slices(train_labels_one_hot)
train_character_token_dataset = tf.data.Dataset.zip((train_character_token_data, train_character_token_labels))
train_character_token_dataset = train_character_token_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
# Combining both of the validation dataset & batching
val_character_token_data = tf.data.Dataset.from_tensor_slices((val_sentences, val_char))
val_character_token_labels = tf.data.Dataset.from_tensor_slices(val_labels_one_hot)
val_character_token_dataset = tf.data.Dataset.zip((val_character_token_data, val_character_token_labels))
val_character_token_dataset = val_character_token_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
# Fit
model_5_history = model_5.fit(train_character_token_dataset,
steps_per_epoch=int(0.1 * len(train_character_token_dataset)),
epochs=3,
validation_data=val_character_token_dataset,
validation_steps=int(0.1 * len(val_character_token_dataset)))
model_5.summary()
# model_5 evaluation
model_5.evaluate(val_character_token_dataset)
# model_5 predictions
model_5_pred_probs = model_5.predict(val_character_token_dataset)
model_5_preds = tf.argmax(model_5_pred_probs, axis=1)
model_5_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_5_preds)
model_5_results
###Output
_____no_output_____
###Markdown
model_5 performs better than model_4 model_6 with transfer learning and positional embeddings
###Code
# EDA before modelling
train_df["line_number"].value_counts()
train_df.line_number.plot.hist();
# One hot encoding
train_line_numbers_one_hot = tf.one_hot(train_df["line_number"].to_numpy(), depth=15)
val_line_numbers_one_hot = tf.one_hot(val_df["line_number"].to_numpy(), depth=15)
test_line_numbers_one_hot = tf.one_hot(test_df["line_number"].to_numpy(), depth=15)
train_line_numbers_one_hot, train_line_numbers_one_hot[:25]
# Unique numbers of lines
train_df["total_lines"].value_counts()
train_df.total_lines.plot.hist();
# total_lines value of 20 in %
np.percentile(train_df.total_lines, 98)
# One hot encoding the total_lines
train_total_lines_one_hot = tf.one_hot(train_df["total_lines"].to_numpy(), depth=20)
val_total_lines_one_hot = tf.one_hot(val_df["total_lines"].to_numpy(), depth=20)
test_total_lines_one_hot = tf.one_hot(test_df["total_lines"].to_numpy(), depth=20)
train_total_lines_one_hot.shape, train_total_lines_one_hot[:15]
# Modelling model_6
# token
token_inputs = layers.Input(shape=[], dtype="string", name="token_inputs")
token_embeddings = tf_hub_embedding_layer(token_inputs)
token_outputs = layers.Dense(128, activation="relu")(token_embeddings)
token_model = tf.keras.Model(inputs=token_inputs,
outputs=token_outputs)
# character
character_inputs = layers.Input(shape=(1,), dtype="string", name="character_inputs")
character_vectors = character_vectorizer(character_inputs)
character_embeddings = character_embed(character_vectors)
character_biLSTM = layers.Bidirectional(layers.LSTM(32))(character_embeddings)
character_model = tf.keras.Model(inputs=character_inputs,
outputs=character_biLSTM)
# line numbers
line_number_inputs = layers.Input(shape=(15,), dtype=tf.int32, name="line_number_input")
x = layers.Dense(32, activation="relu")(line_number_inputs)
line_number_model = tf.keras.Model(inputs=line_number_inputs,
outputs=x)
# total lines
total_lines_inputs = layers.Input(shape=(20,), dtype=tf.int32, name="total_lines_input")
y = layers.Dense(32, activation="relu")(total_lines_inputs)
total_line_model = tf.keras.Model(inputs=total_lines_inputs,
outputs=y)
# Concatenate token & character embeddings
combined_embeddings = layers.Concatenate(name="token_character_hybrid_embedding")([token_model.output,
character_model.output])
z = layers.Dense(256, activation="relu")(combined_embeddings)
z = layers.Dropout(0.5)(z)
# Concatenate positional embeddings with token & character embeddings
z = layers.Concatenate(name="token_character_positional_embedding")([line_number_model.output,
total_line_model.output,
z])
# Output layer
output_layer = layers.Dense(5, activation="softmax", name="output_layer")(z)
# model_6
model_6 = tf.keras.Model(inputs=[line_number_model.input,
total_line_model.input,
token_model.input,
character_model.input],
outputs=output_layer)
model_6.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
# training & validation dataset for x, y, z
train_pos_character_token_data = tf.data.Dataset.from_tensor_slices((train_line_numbers_one_hot,
train_total_lines_one_hot,
train_sentences,
train_char))
train_pos_character_token_labels = tf.data.Dataset.from_tensor_slices(train_labels_one_hot)
train_pos_character_token_dataset = tf.data.Dataset.zip((train_pos_character_token_data, train_pos_character_token_labels))
train_pos_character_token_dataset = train_pos_character_token_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
# validation_dataset
val_pos_character_token_data = tf.data.Dataset.from_tensor_slices((val_line_numbers_one_hot,
val_total_lines_one_hot,
val_sentences,
val_char))
val_pos_character_token_labels = tf.data.Dataset.from_tensor_slices(val_labels_one_hot)
val_pos_character_token_dataset = tf.data.Dataset.zip((val_pos_character_token_data, val_pos_character_token_labels))
val_pos_character_token_dataset = val_pos_character_token_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
# Fit
model_6_history = model_6.fit(train_pos_character_token_dataset,
steps_per_epoch=int(0.1 * len(train_pos_character_token_dataset)),
epochs=3,
validation_data=val_pos_character_token_dataset,
validation_steps=int(0.1 * len(val_pos_character_token_dataset)))
###Output
Epoch 1/3
562/562 [==============================] - 130s 222ms/step - loss: 1.1002 - accuracy: 0.7239 - val_loss: 0.9839 - val_accuracy: 0.8049
Epoch 2/3
562/562 [==============================] - 129s 229ms/step - loss: 0.9679 - accuracy: 0.8153 - val_loss: 0.9501 - val_accuracy: 0.8291
Epoch 3/3
562/562 [==============================] - 130s 232ms/step - loss: 0.9510 - accuracy: 0.8236 - val_loss: 0.9377 - val_accuracy: 0.8334
###Markdown
model_6 has 83.3% accuracy
###Code
model_6.summary()
# model_6 predictions
model_6_pred_probs = model_6.predict(val_pos_character_token_dataset, verbose=1)
model_6_preds = tf.argmax(model_6_pred_probs, axis=1)
model_6_results = calculate_results(y_true=val_labels_encoded,
y_pred=model_6_preds)
model_6_results
# Comparing results from all models (model_1 to model_6)
all_model_results = pd.DataFrame({"model_1": model_1_results,
"model_2": model_2_results,
"model_3": model_3_results,
"model_4": model_4_results,
"model_5": model_5_results,
"model_6": model_6_results})
all_model_results = all_model_results.transpose()
all_model_results
all_model_results["accuracy"] = all_model_results["accuracy"]/100
all_model_results.plot(kind="bar", figsize=(12, 8)).legend(bbox_to_anchor=(1.0, 1.0));
all_model_results.sort_values("f1", ascending=False)["f1"].plot(kind="bar", figsize=(12, 8));
# Saving model_6
model_6.save("nlp_pubmed_model_6")
!cp nlp_pubmed_model_6 -r /content/drive/MyDrive/NLP-projects/nlp_pubmed_model_6
###Output
_____no_output_____
###Markdown
Evaluating on test data
###Code
# Preprocessing test dataset, evaluation & predictions
test_pos_character_token_data = tf.data.Dataset.from_tensor_slices((test_line_numbers_one_hot,
test_total_lines_one_hot,
test_sentences,
test_char))
test_pos_character_token_labels = tf.data.Dataset.from_tensor_slices(test_labels_one_hot)
test_pos_character_token_dataset = tf.data.Dataset.zip((test_pos_character_token_data, test_pos_character_token_labels))
test_pos_character_token_dataset = test_pos_character_token_dataset.batch(32).prefetch(tf.data.AUTOTUNE)
# Predictions
test_pred_probs = model_6.predict(test_pos_character_token_dataset,
verbose=1)
test_preds = tf.argmax(test_pred_probs, axis=1)
# Evaluation results
model_6_test_results = calculate_results(y_true=test_labels_encoded,
y_pred=test_preds)
model_6_test_results
# Evaluating the most wrong predictions
test_pred_classes = [label_encoder.classes_[pred] for pred in test_preds]
# Integrating prediction in test_df
test_df["prediction"] = test_pred_classes
test_df["pred_prob"] = tf.reduce_max(test_pred_probs, axis=1).numpy()
test_df["correct"] = test_df["prediction"] == test_df["target"]
# 200 most wrong predictions
most_wrong_200 = test_df[test_df["correct"] == False].sort_values("pred_prob", ascending=False)[:200]
most_wrong_200
# most commonly wrong predictions
for row in most_wrong_200[0:20].itertuples():
_, target, text, line_number, total_lines, prediction, pred_prob, _ = row
print(f"Target: {target}, Prediction: {prediction}, Probability: {pred_prob}, Line Number: {line_number}, Total Lines: {total_lines}\n")
print(f"Text:\n{text}\n")
print("- - - - -\n")
###Output
Target: METHODS, Prediction: BACKGROUND, Probability: 0.9474185705184937, Line Number: 1, Total Lines: 11
Text:
pretest-posttest .
- - - - -
Target: CONCLUSIONS, Prediction: METHODS, Probability: 0.9433786869049072, Line Number: 4, Total Lines: 6
Text:
symptom outcomes will be assessed and estimates of cost-effectiveness made .
- - - - -
Target: CONCLUSIONS, Prediction: BACKGROUND, Probability: 0.9317997097969055, Line Number: 19, Total Lines: 19
Text:
clinicaltrials.gov identifier : nct@ .
- - - - -
Target: BACKGROUND, Prediction: OBJECTIVE, Probability: 0.9295032620429993, Line Number: 0, Total Lines: 12
Text:
to evaluate the effects of the lactic acid bacterium lactobacillus salivarius on caries risk factors .
- - - - -
Target: RESULTS, Prediction: METHODS, Probability: 0.9262389540672302, Line Number: 4, Total Lines: 13
Text:
the primary endpoint is the cumulative three-year hiv incidence .
- - - - -
Target: CONCLUSIONS, Prediction: BACKGROUND, Probability: 0.9260937571525574, Line Number: 18, Total Lines: 18
Text:
nct@ ( clinicaltrials.gov ) .
- - - - -
Target: RESULTS, Prediction: BACKGROUND, Probability: 0.9218314290046692, Line Number: 8, Total Lines: 15
Text:
non-diffuse-trickling '' ) .
- - - - -
Target: CONCLUSIONS, Prediction: BACKGROUND, Probability: 0.9202677607536316, Line Number: 15, Total Lines: 15
Text:
-lsb- netherlands trial register ( http://www.trialregister.nl/trialreg/index.asp ) , nr @ , date of registration @ december @ . -rsb-
- - - - -
Target: RESULTS, Prediction: METHODS, Probability: 0.9188950657844543, Line Number: 3, Total Lines: 16
Text:
a cluster randomised trial was implemented with @,@ children in @ government primary schools on the south coast of kenya in @-@ .
- - - - -
Target: CONCLUSIONS, Prediction: BACKGROUND, Probability: 0.9168384671211243, Line Number: 13, Total Lines: 13
Text:
( clinicaltrials.gov : nct@ ) .
- - - - -
Target: RESULTS, Prediction: METHODS, Probability: 0.9142534136772156, Line Number: 4, Total Lines: 14
Text:
a screening questionnaire for moh was sent to all @-@ year old patients on these gps ` list .
- - - - -
Target: RESULTS, Prediction: METHODS, Probability: 0.9101817011833191, Line Number: 6, Total Lines: 14
Text:
the primary outcome was to evaluate changes in abdominal and shoulder-tip pain via a @-mm visual analog scale at @ , @ , and @hours postoperatively .
- - - - -
Target: METHODS, Prediction: RESULTS, Probability: 0.9094158411026001, Line Number: 6, Total Lines: 9
Text:
-@ % vs. fish : -@ % vs. fish + s : -@ % ; p < @ ) but there were no significant differences between groups .
- - - - -
Target: METHODS, Prediction: BACKGROUND, Probability: 0.908202588558197, Line Number: 4, Total Lines: 9
Text:
clinicaltrials.gov identifier : nct@ .
- - - - -
Target: BACKGROUND, Prediction: OBJECTIVE, Probability: 0.9057267904281616, Line Number: 0, Total Lines: 9
Text:
to compare the efficacy of the newcastle infant dialysis and ultrafiltration system ( nidus ) with peritoneal dialysis ( pd ) and conventional haemodialysis ( hd ) in infants weighing < @ kg .
- - - - -
Target: BACKGROUND, Prediction: OBJECTIVE, Probability: 0.9013079404830933, Line Number: 0, Total Lines: 11
Text:
to compare the safety and efficacy of dexmedetomidine/propofol ( dp ) - total i.v. anaesthesia ( tiva ) vs remifentanil/propofol ( rp ) - tiva , both with spontaneous breathing , during airway foreign body ( fb ) removal in children .
- - - - -
Target: METHODS, Prediction: OBJECTIVE, Probability: 0.8997651934623718, Line Number: 0, Total Lines: 7
Text:
to determine whether the insulin resistance that exists in metabolic syndrome ( mets ) patients is modulated by dietary fat composition .
- - - - -
Target: RESULTS, Prediction: CONCLUSIONS, Probability: 0.8978807330131531, Line Number: 13, Total Lines: 15
Text:
additionally , intervention effects were observed for information gathering in women with high genetic literacy , but not in women with low genetic literacy .
- - - - -
Target: CONCLUSIONS, Prediction: BACKGROUND, Probability: 0.8975597023963928, Line Number: 10, Total Lines: 10
Text:
clinicaltrials.gov : nct@ .
- - - - -
Target: RESULTS, Prediction: METHODS, Probability: 0.8973494172096252, Line Number: 3, Total Lines: 11
Text:
family practices were randomly assigned to receive the educational toolkit in june @ ( intervention group ) or may @ ( control group ) .
- - - - -
###Markdown
Predicting on PubMed NCBI research paper **Source** - `https://pubmed.ncbi.nlm.nih.gov/20232240/`Using the research paper from `PubMed NCBI` by `Christopher Lopata, Marcus L Thomeer, etc`.**Name** - `Randomized Controlled Trial: RCT of a manualized social treatment for high-functioning autism spectrum disorders`**Abstract**:"This RCT examined the efficacy of a manualized social intervention for children with HFASDs. Participants were randomly assigned to treatment or wait-list conditions. Treatment included instruction and therapeutic activities targeting social skills, face-emotion recognition, interest expansion, and interpretation of non-literal language. A response-cost program was applied to reduce problem behaviors and foster skills acquisition. Significant treatment effects were found for five of seven primary outcome measures (parent ratings and direct child measures). Secondary measures based on staff ratings (treatment group only) corroborated gains reported by parents. High levels of parent, child and staff satisfaction were reported, along with high levels of treatment fidelity. Standardized effect size estimates were primarily in the medium and large ranges and favored the treatment group."**File** - Using the `Abstract` of the paper in `.json` format for readability.**Link**: `https://raw.githubusercontent.com/hecshzye/nlp-medical-abstract-pubmed-rct/main/pubmed_ncbi_autism_disorder.json`
###Code
import json
# Loading the NCBI paper
!wget https://raw.githubusercontent.com/hecshzye/nlp-medical-abstract-pubmed-rct/main/pubmed_ncbi_autism_disorder.json
with open("pubmed_ncbi_autism_disorder.json", "r") as f:
ncbi_abstract = json.load(f)
ncbi_abstract
abstracts = pd.DataFrame(ncbi_abstract)
# Using spacy sentencizer
from spacy.lang.en import English
nlp = English()
sentencizer = nlp.create_pipe("sentencizer")
nlp.add_pipe(sentencizer)
doc = nlp(ncbi_abstract[0]["abstract"])
abstract_lines = [str(sent) for sent in list(doc.sents)]
abstract_lines
# Preprocessing the ncbi_abstract
total_lines_in_ncbi_abstract = len(abstract_lines)
sample_lines = []
for i, line in enumerate(abstract_lines):
sample_dict = {}
sample_dict["text"] = str(line)
sample_dict["line_number"] = i
sample_dict["total_lines"] = total_lines_in_ncbi_abstract - 1
sample_lines.append(sample_dict)
sample_lines
# Encoding
test_abstract_line_numbers = [line["line_number"] for line in sample_lines]
test_abstract_line_numbers_one_hot = tf.one_hot(test_abstract_line_numbers, depth=15)
test_abstract_total_lines = [line["total_lines"] for line in sample_lines]
test_abstract_total_lines_one_hot = tf.one_hot(test_abstract_total_lines, depth=20)
# Spliting into characters
abstract_characters = [split_character(sentence) for sentence in abstract_lines]
# Predictions
test_abstract_pred_probs = model_6.predict(x=(test_abstract_line_numbers_one_hot,
test_abstract_total_lines_one_hot,
tf.constant(abstract_lines),
tf.constant(abstract_characters)))
test_abstract_preds = tf.argmax(test_abstract_pred_probs, axis=1)
test_abstract_pred_classes = [label_encoder.classes_[i] for i in test_abstract_preds]
for i, line in enumerate(abstract_lines):
print(f"{test_abstract_pred_classes[i]}: {line}")
###Output
OBJECTIVE: This RCT examined the efficacy of a manualized social intervention for children with HFASDs.
METHODS: Participants were randomly assigned to treatment or wait-list conditions.
METHODS: Treatment included instruction and therapeutic activities targeting social skills, face-emotion recognition, interest expansion, and interpretation of non-literal language.
METHODS: A response-cost program was applied to reduce problem behaviors and foster skills acquisition.
RESULTS: Significant treatment effects were found for five of seven primary outcome measures (parent ratings and direct child measures).
METHODS: Secondary measures based on staff ratings (treatment group only) corroborated gains reported by parents.
RESULTS: High levels of parent, child and staff satisfaction were reported, along with high levels of treatment fidelity.
RESULTS: Standardized effect size estimates were primarily in the medium and large ranges and favored the treatment group.
###Markdown
Model Predictions **Original Abstract**:"This RCT examined the efficacy of a manualized social intervention for children with HFASDs. Participants were randomly assigned to treatment or wait-list conditions. Treatment included instruction and therapeutic activities targeting social skills, face-emotion recognition, interest expansion, and interpretation of non-literal language. A response-cost program was applied to reduce problem behaviors and foster skills acquisition. Significant treatment effects were found for five of seven primary outcome measures (parent ratings and direct child measures). Secondary measures based on staff ratings (treatment group only) corroborated gains reported by parents. High levels of parent, child and staff satisfaction were reported, along with high levels of treatment fidelity. Standardized effect size estimates were primarily in the medium and large ranges and favored the treatment group." **Model's `Predicted` Abstract which makes Abstract easier to read**`Abstract` after `Natural Language Processing` (`model_6`):**OBJECTIVE**: This RCT examined the efficacy of a manualized social intervention for children with HFASDs.**METHODS**: Participants were randomly assigned to treatment or wait-list conditions.**METHODS**: Treatment included instruction and therapeutic activities targeting social skills, face-emotion recognition, interest expansion, and interpretation of non-literal language.**METHODS**: A response-cost program was applied to reduce problem behaviors and foster skills acquisition.**RESULTS**: Significant treatment effects were found for five of seven primary outcome measures (parent ratings and direct child measures).**METHODS**: Secondary measures based on staff ratings (treatment group only) corroborated gains reported by parents.**RESULTS**: High levels of parent, child and staff satisfaction were reported, along with high levels of treatment fidelity.**RESULTS**: Standardized effect size estimates were primarily in the medium and large ranges and favored the treatment group.
###Code
###Output
_____no_output_____ |
24. CNN.ipynb | ###Markdown
Convolutional Neural Network Importing the libraries
###Code
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
tf.__version__
###Output
_____no_output_____
###Markdown
Part 1 - Data Preprocessing Preprocessing the Training Set
###Code
train_datagen = ImageDataGenerator(
rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
training_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
###Output
Found 8000 images belonging to 2 classes.
###Markdown
Preprocessing the Test set
###Code
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory(
'dataset/test_set',
target_size = (64,64),
batch_size = 32,
class_mode = 'binary')
###Output
Found 2000 images belonging to 2 classes.
###Markdown
Part 2 - Building the CNN Initialising the CNN
###Code
cnn = tf.keras.models.Sequential()
###Output
_____no_output_____
###Markdown
Step 1 - Convolution
###Code
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size= 3, activation= 'relu', input_shape = [64, 64, 3]))
###Output
_____no_output_____
###Markdown
Step 2 - Pooling
###Code
cnn.add(tf.keras.layers.MaxPool2D(pool_size = 2, strides = 2))
###Output
_____no_output_____
###Markdown
Adding second convolutional layer
###Code
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size= 3, activation= 'relu'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size = 2, strides = 2))
###Output
_____no_output_____
###Markdown
Step 3 - Flattening
###Code
cnn.add(tf.keras.layers.Flatten())
###Output
_____no_output_____
###Markdown
Step 4 - Full Connection
###Code
cnn.add(tf.keras.layers.Dense(units = 128, activation = 'relu'))
###Output
_____no_output_____
###Markdown
Step 5 - Output Layer
###Code
cnn.add(tf.keras.layers.Dense(units = 1, activation = 'sigmoid'))
###Output
_____no_output_____
###Markdown
Part 3 - Training the CNN Compiling the CNN
###Code
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
###Output
_____no_output_____
###Markdown
Training the CNN on the Training set and evaluating on the Test set
###Code
cnn.fit(x = training_set, validation_data = test_set, epochs = 25)
###Output
Epoch 1/25
250/250 [==============================] - 78s 313ms/step - loss: 0.6681 - accuracy: 0.5775 - val_loss: 0.6089 - val_accuracy: 0.6795
Epoch 2/25
250/250 [==============================] - 56s 225ms/step - loss: 0.5922 - accuracy: 0.6848 - val_loss: 0.5642 - val_accuracy: 0.7080
Epoch 3/25
250/250 [==============================] - 49s 198ms/step - loss: 0.5473 - accuracy: 0.7211 - val_loss: 0.5534 - val_accuracy: 0.7145
Epoch 4/25
250/250 [==============================] - 47s 190ms/step - loss: 0.5115 - accuracy: 0.7433 - val_loss: 0.4840 - val_accuracy: 0.7650
Epoch 5/25
250/250 [==============================] - 59s 236ms/step - loss: 0.4815 - accuracy: 0.7713 - val_loss: 0.4966 - val_accuracy: 0.7640
Epoch 6/25
250/250 [==============================] - 70s 280ms/step - loss: 0.4718 - accuracy: 0.7754 - val_loss: 0.4865 - val_accuracy: 0.7570
Epoch 7/25
250/250 [==============================] - 67s 268ms/step - loss: 0.4481 - accuracy: 0.7906 - val_loss: 0.4601 - val_accuracy: 0.7870
Epoch 8/25
250/250 [==============================] - 75s 299ms/step - loss: 0.4360 - accuracy: 0.7979 - val_loss: 0.5117 - val_accuracy: 0.7485
Epoch 9/25
250/250 [==============================] - 72s 287ms/step - loss: 0.4186 - accuracy: 0.8116 - val_loss: 0.4474 - val_accuracy: 0.8020
Epoch 10/25
250/250 [==============================] - 75s 302ms/step - loss: 0.4118 - accuracy: 0.8071 - val_loss: 0.4420 - val_accuracy: 0.7915
Epoch 11/25
250/250 [==============================] - 74s 296ms/step - loss: 0.3884 - accuracy: 0.8209 - val_loss: 0.4548 - val_accuracy: 0.8140
Epoch 12/25
250/250 [==============================] - 75s 301ms/step - loss: 0.3921 - accuracy: 0.8260 - val_loss: 0.4542 - val_accuracy: 0.7940
Epoch 13/25
250/250 [==============================] - 73s 293ms/step - loss: 0.3765 - accuracy: 0.8328 - val_loss: 0.4855 - val_accuracy: 0.7795
Epoch 14/25
250/250 [==============================] - 71s 285ms/step - loss: 0.3640 - accuracy: 0.8350 - val_loss: 0.4593 - val_accuracy: 0.7970
Epoch 15/25
250/250 [==============================] - 55s 220ms/step - loss: 0.3588 - accuracy: 0.8381 - val_loss: 0.4400 - val_accuracy: 0.8165
Epoch 16/25
250/250 [==============================] - 51s 203ms/step - loss: 0.3426 - accuracy: 0.8479 - val_loss: 0.4521 - val_accuracy: 0.8060
Epoch 17/25
250/250 [==============================] - 30s 120ms/step - loss: 0.3347 - accuracy: 0.8514 - val_loss: 0.4659 - val_accuracy: 0.8055
Epoch 18/25
250/250 [==============================] - 28s 111ms/step - loss: 0.3156 - accuracy: 0.8640 - val_loss: 0.4599 - val_accuracy: 0.8075
Epoch 19/25
250/250 [==============================] - 27s 107ms/step - loss: 0.3094 - accuracy: 0.8673 - val_loss: 0.4600 - val_accuracy: 0.8185
Epoch 20/25
250/250 [==============================] - 27s 108ms/step - loss: 0.2961 - accuracy: 0.8736 - val_loss: 0.4591 - val_accuracy: 0.8235ccuracy - ETA: 1s - loss: 0.2 - ETA: 0s - loss: 0.296
Epoch 21/25
250/250 [==============================] - 28s 112ms/step - loss: 0.2884 - accuracy: 0.8731 - val_loss: 0.4409 - val_accuracy: 0.8195
Epoch 22/25
250/250 [==============================] - 28s 111ms/step - loss: 0.2690 - accuracy: 0.8854 - val_loss: 0.4976 - val_accuracy: 0.8050s - loss: 0.2679 -
Epoch 23/25
250/250 [==============================] - 27s 109ms/step - loss: 0.2626 - accuracy: 0.8891 - val_loss: 0.5627 - val_accuracy: 0.7985
Epoch 24/25
250/250 [==============================] - 27s 109ms/step - loss: 0.2669 - accuracy: 0.8878 - val_loss: 0.4831 - val_accuracy: 0.8090
Epoch 25/25
250/250 [==============================] - 28s 110ms/step - loss: 0.2465 - accuracy: 0.8934 - val_loss: 0.5086 - val_accuracy: 0.8100
###Markdown
Part 4 - Making a single prediction
###Code
import numpy as np
from keras.preprocessing import image
test_image = image.load_img( 'dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = cnn.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction)
%%html
<marquee style='width: 90%; color: orange; font-size:150%;'><b>With 90% accuracy our CNN model was able to classify that the single image was of a dog</b></marquee>
###Output
_____no_output_____ |
scripts/4-inferencia-estadistica-partir-muestra/1-pruebas-hipotesis/pruebas-hipotesis.ipynb | ###Markdown
Pruebas de hipótesis
###Code
import pandas as pd
import numpy as np
from scipy.stats import expon
from scipy.stats import uniform
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
muestra = [42, 35, 29, 45, 41, 57, 54, 47, 48, 56, 47, 35, 52, 31, 52, 55, 57, 58, 26, 29, 32, 37, 32, 34, 48, 20, 48, 51, 27, 24, 39, 40, 31, 34, 23, 24, 41, 58, 44, 48, 31, 23, 27, 55, 43, 47, 30, 57, 38, 51]
len(muestra)
###Output
_____no_output_____
###Markdown
Hipótesis
###Code
media, var, skew, kurt = expon.stats(scale = 30, moments = 'mvsk')
# Paso 1: Parámetro lambda
# Paso 2: HP
mu = 30
mu > 30
# Paso 3: Mejor estimador
# Estimador: Promedio
# Paso 4: Distribución: Normal
promedio = np.mean(muestra)
promedio
# Paso 5: Calculo del Estimador y su Valor Asociado estadístico
z = (promedio - mu) / np.sqrt(var/50)
z
# Paso 6: Definir un criterio de rechazo y la tolerancia al error
alpha = 0.05
# Criterios de rechazo
from scipy.stats import norm
data_norm = norm.rvs(size = 1000000)
ax = sns.distplot(data_norm, bins = 500, kde = False, color = 'blue')
ax.set_title('Distribución normal')
# El valor crítico será el punto en el que nuestra distribución acumule una probabilidad de cola derecha que esté asociada a nuestro valor de error alfa
valor_critico = norm.ppf(1-alpha, loc = 0, scale = 1)
valor_critico
ax = sns.distplot(data_norm, bins = 500, kde = False, color = 'blue')
ax.set_title('Distribución normal')
ax.axvline(x = valor_critico, linestyle = '--', c = 'r', label = 'valor crítico')
ax.axvline(x = z, linestyle = '--', c = 'k', label = 'valor estadístico')
ax.legend()
###Output
_____no_output_____ |
LS_DS2_24_Cross_Validation_AND_Feature_Selection_LIVE_LESSON.ipynb | ###Markdown
_Lambda School Data Science - Model Validation_ Example solution to the Cross-Validation assignment — plus Feature Selection!See also Sebastian Raschka's example, [Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb).
###Code
# We'll modify a project from Python Data Science Handbook by Jake VanderPlas
# https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic
# Predicting Bicycle Traffic
# As an example, let's take a look at whether we can predict the number of
# bicycle trips across Seattle's Fremont Bridge based on weather, season,
# and other factors.
# We will join the bike data with another dataset, and try to determine the
# extent to which weather and seasonal factors—temperature, precipitation,
# and daylight hours—affect the volume of bicycle traffic through this corridor.
# Fortunately, the NOAA makes available their daily weather station data
# (I used station ID USW00024233) and we can easily use Pandas to join
# the two data sources.
import numpy as np
import pandas as pd
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
def load():
fremont_bridge = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
bicycle_weather = 'https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/BicycleWeather.csv'
counts = pd.read_csv(fremont_bridge, index_col='Date', parse_dates=True,
infer_datetime_format=True)
weather = pd.read_csv(bicycle_weather, index_col='DATE', parse_dates=True,
infer_datetime_format=True)
daily = counts.resample('d').sum()
daily['Total'] = daily.sum(axis=1)
daily = daily[['Total']] # remove other columns
weather_columns = ['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN', 'AWND']
daily = daily.join(weather[weather_columns], how='inner')
# Make a feature for yesterday's total
daily['Total_yesterday'] = daily.Total.shift(1)
daily = daily.drop(index=daily.index[0])
return daily
def split(daily):
# Hold out an "out-of-time" test set, from the last 100 days of data
train = daily[:-100]
test = daily[-100:]
X_train = train.drop(columns='Total')
y_train = train.Total
X_test = test.drop(columns='Total')
y_test = test.Total
return X_train, X_test, y_train, y_test
def jake_wrangle(X):
X = X.copy()
# patterns of use generally vary from day to day;
# let's add binary columns that indicate the day of the week:
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for i, day in enumerate(days):
X[day] = (X.index.dayofweek == i).astype(float)
# we might expect riders to behave differently on holidays;
# let's add an indicator of this as well:
from pandas.tseries.holiday import USFederalHolidayCalendar
cal = USFederalHolidayCalendar()
holidays = cal.holidays('2012', '2016')
X = X.join(pd.Series(1, index=holidays, name='holiday'))
X['holiday'].fillna(0, inplace=True)
# We also might suspect that the hours of daylight would affect
# how many people ride; let's use the standard astronomical calculation
# to add this information:
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
days = (date - pd.datetime(2000, 12, 21)).days
m = (1. - np.tan(np.radians(latitude))
* np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25)))
return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180.
X['daylight_hrs'] = list(map(hours_of_daylight, X.index))
# temperatures are in 1/10 deg C; convert to C
X['TMIN'] /= 10
X['TMAX'] /= 10
# We can also calcuate the average temperature.
X['Temp (C)'] = 0.5 * (X['TMIN'] + X['TMAX'])
# precip is in 1/10 mm; convert to inches
X['PRCP'] /= 254
# In addition to the inches of precipitation, let's add a flag that
# indicates whether a day is dry (has zero precipitation):
X['dry day'] = (X['PRCP'] == 0).astype(int)
# Let's add a counter that increases from day 1, and measures how many
# years have passed. This will let us measure any observed annual increase
# or decrease in daily crossings:
X['annual'] = (X.index - X.index[0]).days / 365.
return X
def wrangle(X):
# From DS1 friends!
X = X.copy()
X = X.replace(-9999, 0)
X = jake_wrangle(X)
X['PRCP_yest'] = X.PRCP.shift(1).fillna(X.PRCP.mean())
X['Windchill'] = (((X['Temp (C)'] * (9/5) + 32) * .6215) + 34.74) - (35.75 * (X['AWND']** .16)) + (.4275 * (X['Temp (C)'])) * (X['AWND'] ** .16)
X['Rl_Cold'] = (((X['Temp (C)'] * (9/5) + 32) - X['Windchill']) -32) * (5/9)
X['TMIN_ln'] = X['TMIN'] **2
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
for i, month in enumerate(months):
X[month] = (X.index.month == i+1).astype(float)
return X
# Download and join data into a dataframe
data = load()
%%time
# Split data into train and test
X_train, X_test, y_train, y_test = split(data)
# Do the same wrangling to X_train and X_test
X_train = wrangle(X_train)
X_test = wrangle(X_test)
# Define an estimator and param_grid
pipe = make_pipeline(
RobustScaler(),
SelectKBest(f_regression),
Ridge())
param_grid = {
'selectkbest__k': range(1, len(X_train.columns)+1),
'ridge__alpha': [0.1, 1.0, 10.]
}
# Fit on the train set, with grid search cross-validation
gs = GridSearchCV(pipe, param_grid=param_grid, cv=3,
scoring='neg_mean_absolute_error',
verbose=1)
gs.fit(X_train, y_train)
validation_score = gs.best_score_
print('Best estimator:', gs.best_estimator_, '\n')
print('Cross-Validation Score:', -validation_score, '\n')
# Predict with X_test features
y_pred = gs.predict(X_test)
# Compare predictions to y_test labels
test_score = mean_absolute_error(y_test, y_pred)
print('Test Score:', test_score)
# Or use the grid search's score method,
# which combines these steps
test_score = gs.score(X_test, y_test)
print('Test Score:', -test_score)
type(gs.best_estimator_.named_steps['selectkbest'])
selector.get_support()
all_names
# Which features were selected?
selector = gs.best_estimator_.named_steps['selectkbest']
all_names = X_train.columns
selected_mask = selector.get_support()
selected_names = all_names[selected_mask]
unselected_names = all_names[~selected_mask] # Inverting True and False
print('Features selected:')
for name in selected_names:
print(name)
print()
print('Features not selected:')
for name in unselected_names:
print(name)
###Output
Features selected:
PRCP
TMAX
TMIN
AWND
Total_yesterday
Mon
Tue
Wed
Thu
Sat
Sun
holiday
daylight_hrs
Temp (C)
dry day
annual
PRCP_yest
Windchill
Rl_Cold
TMIN_ln
Jan
Feb
Mar
May
Jun
Jul
Aug
Sep
Nov
Dec
Features not selected:
SNOW
SNWD
Fri
Apr
Oct
###Markdown
BONUS: Recursive Feature Elimination!https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html
###Code
from sklearn.feature_selection import RFECV
X_train_scaled = RobustScaler().fit_transform(X_train)
rfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error', cv=3)
X_train_subset = rfe.fit_transform(X_train_scaled, y_train)
all_names = X_train.columns
selected_mask = rfe.support_
selected_names = all_names[selected_mask]
unselected_names = all_names[~selected_mask]
print('Features selected:')
for name in selected_names:
print(name)
print()
print('Features not selected:')
for name in unselected_names:
print(name)
X_train_subset = pd.DataFrame(X_train_subset, columns=selected_names)
X_train.shape, X_train_subset.shape
X_test_subset = rfe.transform(X_test)
X_test_subset = pd.DataFrame(X_test_subset, columns=selected_names)
X_test.shape, X_test_subset.shape
X_train_subset.head()
X_test_subset.head()
print(X_train.shape, X_train_subset.shape, X_test.shape, X_test_subset.shape)
###Output
(963, 35) (963, 24) (100, 35) (100, 24)
###Markdown
RFE again, but with polynomial features and interaction terms!
###Code
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=2)
X_train_polynomial = poly.fit_transform(X_train)
print(X_train.shape, X_train_polynomial.shape)
from sklearn.feature_selection import RFECV
scaler = RobustScaler()
X_train_scaled = scaler.fit_transform(X_train_polynomial)
rfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error',
step=10, cv=3, verbose=1)
X_train_subset = rfe.fit_transform(X_train_scaled, y_train)
all_names = poly.get_feature_names(X_train.columns)
selected_mask = rfe.support_
selected_names = [name for name, selected in zip(all_names, selected_mask) if selected]
print(f'{rfe.n_features_} Features selected:')
for name in selected_names:
print(name)
# Define an estimator and param_grid
ridge = Ridge()
param_grid = {
'alpha': [0.1, 1.0, 10.]
}
# Fit on the train set, with grid search cross-validation
gs = GridSearchCV(ridge, param_grid=param_grid, cv=3,
scoring='neg_mean_absolute_error',
verbose=1)
gs.fit(X_train_subset, y_train)
validation_score = gs.best_score_
print('Best estimator:', gs.best_estimator_, '\n')
print('Cross-Validation Score:', -validation_score, '\n')
# Do the same transformations to X_test
X_test_polynomial = poly.transform(X_test)
X_test_scaled = scaler.transform(X_test_polynomial)
X_test_subset = rfe.transform(X_test_scaled)
# Use the grid search's score method with X_test_subset
test_score = gs.score(X_test_subset, y_test)
print('Test Score:', -test_score)
X_test.shape, X_test_polynomial.shape, X_test_scaled.shape, X_test_subset.shape
###Output
_____no_output_____
###Markdown
Assignmentwith the Bank Marketing dataset:Do more *data cleaning*, *feature engineering*, and *cross-validation.*Do *Feature Selection* too, using any method(s), such as:- https://scikit-learn.org/stable/modules/feature_selection.html- https://www.kaggle.com/dansbecker/permutation-importance- https://www.statsmodels.org/dev/examples/notebooks/generated/ols.html- Human expertise / intuitionSubmit your predictions to *Kaggle*, and commit your code to *GitHub*!
###Code
!pip install category_encoders
import numpy as np
import pandas as pd
from sklearn.feature_selection import f_regression, SelectKBest, f_classif
from sklearn.linear_model import Ridge
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import category_encoders as ce
def load_split():
# Retrieving files from github
!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Sprint-4-Model-Validation/master/module-1-begin-modeling-process/bank-marketing/train_features.csv
!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Sprint-4-Model-Validation/master/module-1-begin-modeling-process/bank-marketing/train_labels.csv
!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Sprint-4-Model-Validation/master/module-1-begin-modeling-process/bank-marketing/test_features.csv
!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Sprint-4-Model-Validation/master/module-1-begin-modeling-process/bank-marketing/sample_submission.csv
# Importing all 4 datasets
X_train = pd.read_csv('train_features.csv')
train_labels = pd.read_csv('train_labels.csv')
y_train = train_labels['y']
X_test = pd.read_csv('test_features.csv')
sample_submission = pd.read_csv('sample_submission.csv')
return X_train, train_labels, y_train, X_test, sample_submission
X_train, train_labels, y_train, X_test, sample_submission = load_split()
X_train.shape, y_train.shape, X_test.shape, sample_submission.shape
# making wrangle function
def wrangle(X):
X = X.copy
# Dropping id column
X = X.drop(columns='id')
# Feature that has True for calls to client exceeding 10
X['too_many_calls'] = X['campaign'] > 10
# Function doesn't work, get's error on 'drop'
# # Wrangling train and test datasets
# X_train = wrangle(X_train)
# X_test = wrangle(X_test)
X_train = X_train.drop(columns='id')
X_test = X_test.drop(columns='id')
# Feature that has True for calls to client exceeding 10
X_train['too_many_calls'] = X_train['campaign'] > 10
X_test['too_many_calls'] = X_test['campaign'] > 10
X_train.head()
X_train['campaign'].hist(bins=20)
# for column in X_train.columns:
# print(X_train[column].unique())
###Output
_____no_output_____
###Markdown
Baseline
###Code
pd.Series(y_train).value_counts(normalize=True)
majority_class = 0
y_pred = [majority_class] * len(y_train)
len(y_pred)
roc_auc_score(y_train, y_pred)
###Output
_____no_output_____
###Markdown
Model Val/Feature Selection
###Code
# Pipeline
pipe = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
RobustScaler(),
SelectKBest(f_classif),
LogisticRegression(max_iter=1000))
# Param grid
param_grid = {
'selectkbest__k': range(1, len(X_train.columns) + 1),
'logisticregression__solver': ['lbfgs', 'liblinear']
}
gs = GridSearchCV(pipe, param_grid=param_grid, cv=3, scoring='roc_auc', verbose=1)
gs.fit(X_train, y_train)
gs.best_estimator_
gs.best_score_
###Output
_____no_output_____ |
Linear Regression with Regularization/src/Activity 1/Activity 1 - Understanding Linear Regression.ipynb | ###Markdown
Understanding Linear RegressionBefore understanding what's linear regression, let us first learn about some key terms that will be used extensively in the further activity.1. Target: It is the feature that we want to predict.2. Error: It is the difference between the actual and the predicted value. It is also referred to as a Residual.3. Cost Function: It defines the total error of the model. It is this function that the model tries to optimize. In the case of Linear Regressionm, it is the mean of the sum of all the errors in the dataset, popularly known as the **mean squared error**.4. Gradient Descent: It tries to find the best set of coefficients which optimizes the Cost Function.---Now, talking about Linear Regression, it tries to fit a straight line between variables which optimizes the cost function or which explains the maximum variance of the target.The Equation of the straight line is **y = mx+c** where m is the slope of **x** and **c** is the intercept. This equation follows only when we have one independant feature. As the feature increases, the number of x's increases and the number of slopes increases.Whole process of Linear Regression can be summarized as:1. Randomly initialise the slopes/coefficients.2. Use Gradient Descent>Calculate Cost>Calculate the slope of the cost function.>Move in the direction of decreasing cost function by calculating the derivative.Update the parameters.Repeat the above steps untill the minimum value of cost function is not found.This was just a quick recap of what a Linear Regression does. If you want to have a detailed study about this algorithm, I have added links to youtube videos under the **Additional Material** at the last of the activity. So, you can have a look to that as well.For now, enough of theory. Let's begin with implementing Linear Regression and see what it can do.--- Importing LibrariesWe start by importing some libraries which will be required in the future.
###Code
import pandas as pd #to handle the dataset
import matplotlib.pyplot as plt #to draw plots
import seaborn as sns #custom library to plot more visually appealing plots
sns.set()
###Output
_____no_output_____
###Markdown
Loading the Data Tasks:* read the csv file from the path: '../../data/data_cleaned.csv' using pandas.* look at top 5 rows of the dataset.
###Code
# read the file
#display top 5 rows
###Output
_____no_output_____
###Markdown
Solution ```pythondata = pd.read_csv('../../data/data_cleaned.csv')data.head()``` Here, we have to predict the price of a house based on other features. Hence, price will be our target or dependant feature. Whereas others will be the independant features.Now, look at the shape of the data using **shape** attribute of the pandas DataFrame.
###Code
#have a look at the shape of data
###Output
_____no_output_____
###Markdown
Solution ```pythondata.shape``` We can see that out data has **4600 records and 7 columns** Data AnalysisData Analysis is usually done before hand to see if there is anything in the data which can affect the performance of the model. Here, we will draw a pairplot from seaborn's library to visualize the whole dataset at once. A pairplot plots all the pairs of features that are present in the data. This way we can have a more compact and broader view of the data.* Call the *pairplot* function from seaborn library for the whole dataset.
###Code
#plot a pairplot
###Output
_____no_output_____
###Markdown
Solution ```pythonsns.pairplot(data)``` There are some conclusions that can be drawn from above pairplot. These are:1. In the last row, we can see 2-3 points that are unusually higher than the rest of the data. These are typically known as **outliers**. But for the sake of simplicity, we will not dive into its details.2. We can see a linear relationship between sqft_living and bathrooms which seems logical. Train and Test SplitsNow, we will divide our data into train and test splits. We will train our model on the train dataset and then test our model's performance on the test dataset.But first, let us seperate our dependand variable with the rest of independant variable.* store all the independant variables in a variable **X**.* store the dependant variable in a variable **y**.
###Code
# store independant variable in X
#store dependant variable in y
###Output
_____no_output_____
###Markdown
Solution ```pythonX = data.drop('price', axis = 1) independant featuresy = data.price dependant features``` Now, split them into training and test datasets in the ratio of 8:2 using **train_test_split** function. There should be 4 variables namely **X_train, X_test, y_train and y_test**
###Code
#import the function
from sklearn.model_selection import train_test_split
#call the train_test_split function
###Output
_____no_output_____
###Markdown
Solution ```pythonX_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42, shuffle= True)``` The shape of these variables will be like:
###Code
print("X_train: ", X_train.shape)
print('y_train: ', y_train.shape)
print('X_test: ', X_test.shape)
print('y_test: ', y_test.shape)
###Output
_____no_output_____
###Markdown
Fitting a Model* Initialize and fit the model to training data
###Code
#import the model
from sklearn.linear_model import LinearRegression
#initialize the model
#fit the model to the training data
###Output
_____no_output_____
###Markdown
Solution ```pythonmodel = LinearRegression()model.fit(X_train, y_train)``` Evaluate a modelNow, evaluate the model by:* making predictions on training data* making predictions on test data
###Code
#import the evaluation metric
from sklearn.metrics import mean_squared_error
#calculate error on training data
#calculate error on testing data
#print the errors
###Output
_____no_output_____
###Markdown
Solution ```pythontrain_error = mean_squared_error(y_train, model.predict(X_train))test_error = mean_squared_error(y_test, model.predict(X_test))print('Training Error: ', train_error)print('Testing Error: ', test_error)``` Make Predictions
###Code
bedrooms = int(input("Bedrooms: "))
bathrooms = float(input('Bathrooms: '))
sqft_living = float(input('sqft_living: '))
sqft_lot = float(input('sqft_lot: '))
floors = float(input('Floors: '))
condition = int(input('Condition: '))
predicted_price = model.predict([[bedrooms, bathrooms, sqft_living, sqft_lot, floors, condition]])
print("Expected Price: ", predicted_price)
###Output
_____no_output_____
###Markdown
Understanding Linear RegressionBefore understanding what's linear regression, let us first learn about some key terms that will be used extensively in the further activity.1. Target: It is the feature that we want to predict.2. Error: It is the difference between the actual and the predicted value. It is also referred to as a Residual.3. Cost Function: It defines the total error of the model. It is this function that the model tries to optimize. In the case of Linear Regressionm, it is the mean of the sum of all the errors in the dataset, popularly known as the **mean squared error**.4. Gradient Descent: It tries to find the best set of coefficients which optimizes the Cost Function.---Now, talking about Linear Regression, it tries to fit a straight line between variables which optimizes the cost function or which explains the maximum variance of the target.The Equation of the straight line is **y = mx+c** where m is the slope of **x** and **c** is the intercept. This equation follows only when we have one independant feature. As the feature increases, the number of x's increases and the number of slopes increases.Whole process of Linear Regression can be summarized as:1. Randomly initialise the slopes/coefficients.2. Use Gradient Descent>Calculate Cost>Calculate the slope of the cost function.>Move in the direction of decreasing cost function by calculating the derivative.Update the parameters.Repeat the above steps untill the minimum value of cost function is not found.This was just a quick recap of what a Linear Regression does. If you want to have a detailed study about this algorithm, I have added links to youtube videos under the **Additional Material** at the last of the activity. So, you can have a look to that as well.For now, enough of theory. Let's begin with implementing Linear Regression and see what it can do.--- Importing LibrariesWe start by importing some libraries which will be required in the future.
###Code
import pandas as pd #to handle the dataset
import matplotlib.pyplot as plt #to draw plots
import seaborn as sns #custom library to plot more visually appealing plots
sns.set()
###Output
_____no_output_____
###Markdown
Loading the Data Tasks:* read the csv file from the path: '../../data/data_cleaned.csv' using pandas.* look at top 5 rows of the dataset.
###Code
# read the file
#display top 5 rows
###Output
_____no_output_____
###Markdown
Solution ```pythondata = pd.read_csv('../../data/data_cleaned.csv')data.head()``` Here, we have to predict the price of a house based on other features. Hence, price will be our target or dependant feature. Whereas others will be the independant features.Now, look at the shape of the data using **shape** attribute of the pandas DataFrame.
###Code
#have a look at the shape of data
###Output
_____no_output_____
###Markdown
Solution ```pythondata.shape``` We can see that out data has **4600 records and 7 columns** Data AnalysisData Analysis is usually done before hand to see if there is anything in the data which can affect the performance of the model. Here, we will draw a pairplot from seaborn's library to visualize the whole dataset at once. A pairplot plots all the pairs of features that are present in the data. This way we can have a more compact and broader view of the data.* Call the *pairplot* function from seaborn library for the whole dataset.
###Code
#plot a pairplot
###Output
_____no_output_____
###Markdown
Solution ```pythonsns.pairplot(data)``` There are some conclusions that can be drawn from above pairplot. These are:1. In the last row, we can see 2-3 points that are unusually higher than the rest of the data. These are typically known as **outliers**. But for the sake of simplicity, we will not dive into its details.2. We can see a linear relationship between sqft_living and bathrooms which seems logical. Train and Test SplitsNow, we will divide our data into train and test splits. We will train our model on the train dataset and then test our model's performance on the test dataset.But first, let us seperate our dependand variable with the rest of independant variable.* store all the independant variables in a variable **X**.* store the dependant variable in a variable **y**.
###Code
# store independant variable in X
#store dependant variable in y
###Output
_____no_output_____
###Markdown
Solution ```pythonX = data.drop('price', axis = 1) independant featuresy = data.price dependant features``` Now, split them into training and test datasets in the ratio of 8:2 using **train_test_split** function. There should be 4 variables namely **X_train, X_test, y_train and y_test**
###Code
#import the function
from sklearn.model_selection import train_test_split
#call the train_test_split function
###Output
_____no_output_____
###Markdown
Solution ```pythonX_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42, shuffle= True)``` The shape of these variables will be like:
###Code
print("X_train: ", X_train.shape)
print('y_train: ', y_train.shape)
print('X_test: ', X_test.shape)
print('y_test: ', y_test.shape)
###Output
_____no_output_____
###Markdown
Fitting a Model* Initialize and fit the model to training data
###Code
#import the model
from sklearn.linear_model import LinearRegression
#initialize the model
#fit the model to the training data
###Output
_____no_output_____
###Markdown
Solution ```pythonmodel = LinearRegression()model.fit(X_train, y_train)``` Evaluate a modelNow, evaluate the model by:* making predictions on training data* making predictions on test data
###Code
#import the evaluation metric
from sklearn.metrics import mean_squared_error
#calculate error on training data
#calculate error on testing data
#print the errors
###Output
_____no_output_____
###Markdown
Solution ```pythontrain_error = mean_squared_error(y_train, model.predict(X_train))test_error = mean_squared_error(y_test, model.predict(X_test))print('Training Error: ', train_error)print('Testing Error: ', test_error)``` Make Predictions
###Code
bedrooms = int(input("Bedrooms: "))
bathrooms = float(input('Bathrooms: '))
sqft_living = float(input('sqft_living: '))
sqft_lot = float(input('sqft_lot: '))
floors = float(input('Floors: '))
condition = int(input('Condition: '))
predicted_price = model.predict([[bedrooms, bathrooms, sqft_living, sqft_lot, floors, condition]])
print("Expected Price: ", predicted_price)
###Output
_____no_output_____ |
DC_SO_(8).ipynb | ###Markdown
DUPLICACIÓN DE CODIGO - STACKOVERFLOW--- **Directorios**
###Code
path_xlsx = '/content/drive/MyDrive/DC/xlsx'
path_pkl = '/content/drive/MyDrive/DC/pkl'
path_csv = '/content/drive/MyDrive/DC/csv'
path_model = '/content/drive/MyDrive/DC/model'
dataset = '/content/drive/MyDrive/DC'
###Output
_____no_output_____
###Markdown
**Recursos** **Conexión a Google Drive**
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
**Instalaciones**
###Code
!pip install -q xlsxwriter #Motor de escritura de archivos xlsx
!pip install -q google_trans_new #Api modificada de Google Translator con bug solucionados
!pip install -q pandas_read_xml
###Output
_____no_output_____
###Markdown
**Librerias**
###Code
import os
import requests
import time
import re
import pickle
import json
import nltk
import string
import math
from google_trans_new import google_translator
import numpy as np
import pandas as pd
import pandas_read_xml as pdx
from pandas_read_xml import flatten, fully_flatten, auto_separate_tables
from numpy import sqrt
from numpy import argmax
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import nltk
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from nltk import SnowballStemmer
from nltk.corpus import stopwords
from collections import Counter
from datetime import datetime
from sklearn import metrics
from gensim.models.keyedvectors import KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from gensim.models import Word2Vec
from numpy import arange
from numpy import argmax
from sklearn.metrics import *
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
from tensorflow import keras
nltk.download('stopwords')
stops = set(stopwords.words('english'))
pd.set_option('display.max_colwidth', None)
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
**Funciones**
###Code
def del_labels(content):
content = re.sub("\n", '', content)
content = re.sub("<", '<', content)
content = re.sub(">", '>', content)
content = re.sub(""", " ' ", content)
content = re.sub("&", " & ", content)
content = re.sub('((<p>[\s\S]*?<code>) | ((<pre [\s\S]*?><code> | </code></pre>)) | (</code>[\s\S]*?</p>))', ' ', content)
content = re.sub("((<p>)[\s\S]*?(</p>))", '', content)
content = re.sub("((<a)[\s\S]*?(<\/a>))", '', content) #Eliminar contenido de las etiquetas A (Enlace)
content = re.sub("(<img.*>)", '', content) #Eliminar las etiquetas Img (Imagen)
content = re.sub("<\/?\w+>?", '', content) #Eliminar las etiquetas restantes manteniendo el contenido
return content
###Output
_____no_output_____
###Markdown
**Función de Tags**---
###Code
def del_tags(content):
# content = re.sub("<c>", 'cb ', content)
content = re.sub("<", '', content)
content = re.sub(">", ' ', content)
# content = re.sub("[++]", 'p', content)
return content
def tags(data):
data = str(data)
data = data.lower() #Conversion a Minusculas
data = del_tags(data)
return data
###Output
_____no_output_____
###Markdown
**Función de traducción**
###Code
translator = google_translator()
def translate(text):
if len(text) >= 5000:
text_len = int(len(text)/2)
while text[text_len] != ' ':
text_len = text_len + 1
text_aux = text[:text_len]
text_translate = translator.translate(text_aux, lang_tgt='en', lang_src='es')
text_len = text_len + 1
text_aux = text[text_len:]
text_translate = text_translate + translator.translate(text_aux, lang_tgt='en', lang_src='es')
else:
text_translate = translator.translate(text, lang_tgt='en', lang_src='es')
return text_translate
def prepare(data):
data = str(data)
data = data.lower() #Conversion a Minusculas, Normalizacion
data = del_labels(data)
return data
###Output
_____no_output_____
###Markdown
**Filtrado de Datos** **Lectura del archivo Posts.xml**
###Code
df = pdx.read_xml(dataset + '/Posts.xml',['posts'])
df = df.pipe(flatten)
df = df.pipe(flatten)
###Output
_____no_output_____
###Markdown
**Eliminacion de columnas innecesarias**
###Code
posts = df.drop(['row|@AcceptedAnswerId','row|@Title','row|@ViewCount','row|@CreationDate','row|@Score','row|@OwnerUserId','row|@LastEditorUserId','row|@LastEditDate','row|@LastActivityDate','row|@CommentCount','row|@FavoriteCount','row|@ContentLicense','row|@ParentId' ,'row|@LastEditorDisplayName','row|@OwnerDisplayName','row|@CommunityOwnedDate','row|@AnswerCount','row|@ClosedDate'], axis=1)
posts.head()
###Output
_____no_output_____
###Markdown
**Filtrar por tipo de publicacion:** 1. Preguntas2. Respuestas
###Code
masked = posts['row|@PostTypeId'] == '1'
posts = posts[masked]
posts.head()
###Output
_____no_output_____
###Markdown
**Filtrar por etiqueta mysqli**
###Code
mask = posts['row|@Tags'].str.contains('<mysqli>')
posts = posts[mask]
###Output
_____no_output_____
###Markdown
**Eliminacion de columna PostTypeId**
###Code
posts = posts.drop(['row|@PostTypeId'], axis=1)
###Output
_____no_output_____
###Markdown
**Cambio de nombre de columnas**
###Code
posts.columns = ['Id', 'Body', 'Tags']
posts.columns
posts.head()
###Output
_____no_output_____
###Markdown
**Guardado del filtrado de archivo Posts.xml**
###Code
posts.to_excel(dataset + '/posts_data.xlsx', encoding='utf-8', engine='xlsxwriter')
###Output
_____no_output_____
###Markdown
**Preprocesado SOES**--- **Lectura del archivo de filtrado**
###Code
f_posts = pd.read_excel(dataset + '/posts_data.xlsx')
f_posts['Body'][2]
f_posts.dtypes
###Output
_____no_output_____
###Markdown
**Aplicación de preprocesamiento de texto**
###Code
f_posts['pre_body'] = f_posts.apply(lambda f_posts: prepare(f_posts['Body']), axis=1)
f_posts['Tags'] = f_posts.apply(lambda f_posts: tags(f_posts['Tags']), axis=1)
f_posts['len_body'] = f_posts.apply(lambda f_posts: len(f_posts['pre_body']), axis=1)
pp_body_df = f_posts[['Id','Body','Tags','pre_body','len_body']]
pp_body_df['pp_body'] = f_posts['pre_body'].str[:1000]
###Output
_____no_output_____
###Markdown
**Guardado de preprocesado**
###Code
pp_body_df.to_excel(path_xlsx + '/soes_mysqli_00.xlsx', encoding='utf-8', engine='xlsxwriter', index=False)
pp_body_df.to_csv(path_csv + '/soes_mysqli_00.csv', encoding='utf-8', index=False)
pp_body_df.to_pickle(path_pkl + '/soes_mysqli_00.pkl')
pp_body_df.head()
###Output
_____no_output_____
###Markdown
**Preparación de los datos a enviar a la API de google search**
###Code
uno = pd.DataFrame(pp_body_df[['Id','pp_body']][0:100], columns=['Id','pp_body'])
dos = pd.DataFrame(pp_body_df[['Id','pp_body']][100:200], columns=['Id','pp_body'])
tres = pd.DataFrame(pp_body_df[['Id','pp_body']][200:300], columns=['Id','pp_body'])
cuatro = pd.DataFrame(pp_body_df[['Id','pp_body']][300:400], columns=['Id','pp_body'])
cinco = pd.DataFrame(pp_body_df[['Id','pp_body']][400:500], columns=['Id','pp_body'])
seis = pd.DataFrame(pp_body_df[['Id','pp_body']][500:600], columns=['Id','pp_body'])
siete = pd.DataFrame(pp_body_df[['Id','pp_body']][600:700], columns=['Id','pp_body'])
ocho = pd.DataFrame(pp_body_df[['Id','pp_body']][700:800], columns=['Id','pp_body'])
nueve = pd.DataFrame(pp_body_df[['Id','pp_body']][800:900], columns=['Id','pp_body'])
diez = pd.DataFrame(pp_body_df[['Id','pp_body']][900:1000], columns=['Id','pp_body'])
once = pd.DataFrame(pp_body_df[['Id','pp_body']][1000:1100], columns=['Id','pp_body'])
doce = pd.DataFrame(pp_body_df[['Id','pp_body']][1100:1200], columns=['Id','pp_body'])
trece = pd.DataFrame(pp_body_df[['Id','pp_body']][1200:1300], columns=['Id','pp_body'])
catorce = pd.DataFrame(pp_body_df[['Id','pp_body']][1300:1400], columns=['Id','pp_body'])
quince = pd.DataFrame(pp_body_df[['Id','pp_body']][1400:1500], columns=['Id','pp_body'])
dieciseis = pd.DataFrame(pp_body_df[['Id','pp_body']][1500:1546], columns=['Id','pp_body'])
###Output
_____no_output_____
###Markdown
Preprocesado SO--- **Lectura del dataset de SO**
###Code
so = pd.read_csv(dataset + '/QueryResults_Test.csv')
so = so.drop(['creationdate'], axis=1)
so.columns = ['Id', 'Body', 'Tags']
so.columns
len(so)
###Output
_____no_output_____
###Markdown
**Aplicación del preprocesamiento**
###Code
so['pre_body'] = so.apply(lambda so: prepare(so['Body']), axis=1)
so['Tags'] = so.apply(lambda so: tags(so['Tags']), axis=1)
so.shape
###Output
_____no_output_____
###Markdown
**Guardado del preprocesamiento del dataset de SO**
###Code
so.to_excel(path_xlsx + '/so_mysqli_00.xlsx', encoding='utf-8', engine='xlsxwriter')
so.to_csv(path_csv + '/so_mysqli_00.csv', encoding='utf-8')
so.to_pickle(path_pkl + '/so_mysqli_00.pkl')
###Output
_____no_output_____
###Markdown
API Google Custom Search--- **Preparar parametros de la API**
###Code
# API KEY
API_KEY = "AIzaSyAbEATM1C5MvqtM8bYU0auxcV2rEODZE6g"
# ID del Motor de Busqueda
SEARCH_ENGINE_ID = "f49e5d2f8e1185464"
# Resultado Inicial
start = 1
###Output
_____no_output_____
###Markdown
**Consultas de codigo con sus 5 resultados**
###Code
# for index, row in once.iterrows():
# # if index < 101:
# # if index >= 200:
# if index >= 1000:
# print(index)
q_ids = []
q_body = []
res_ids = []
res_body = []
links = []
error_body = []
print(" -- index --")
for index, row in dieciseis.iterrows():
# if index < 101:
# if index >= 200:
if index >= 1500:
query = row['pp_body']
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}&start={start}"
data = requests.get(url).json()
for q in range(0,5):
try:
matches = re.findall(r"(?!questions\/)(\d*)(?=\/)", data['items'][q]['link'])
match = list(filter(None, matches))
except:
error_body.append(query)
continue
if not match:
print(data['items'][q]['link'])
continue
else:
q_ids.append(row['Id'])
q_body.append(row['pp_body'])
res_ids.append(match[0])
res_body.append(data['items'][q]['title'])
links.append(data['items'][q]['link'])
print("Else - ROW")
print(row['Id'], row['pp_body'], match[0], data['items'][q]['title'])
queries_results_df = pd.DataFrame(list(zip(q_ids, q_body, res_ids, res_body, links)), columns =['Id', 'body', 'id_res', 'pp_body','links'])
###Output
-- index --
Else - ROW
436513 function leereventos(){ global $eventosdbc; $evarray = []; $evs = $eventosdbc->rdb(); while($fila= mysqli_fetch_array($evs)){ $evento= new evento($fila[id],$fila[descripcion], $fila[tipo], $fila[timestamp], $fila[pos], $fila[profundidad], $fila[temp_agua], $fila[sal], $fila[fluor], $fila[conductividad], $fila[temp_aire], $fila[humedad], $fila[pres_atmos], $fila[vel_med_viento]); array_push($evarray, $evento); } return $evarray; } 436513 Error PHP undefined constant - Stack Overflow en español
Else - ROW
437719 protected function conectar (){ $this->conexion=mysqli_connect( $this->servidor, $this->usuario, $this->password, $this->basededatos); if (!$this->conexion) { die( ' conexion erronea ' . mysqli_connect_error()); } } protected function desconectar (){ mysqli_close($this->conexion); } public function ejecutar($query){ $this->conectar(); return mysqli_query($this->conexion, $query); $this->desconectar(); } 437719 Too many connections Mysql / PHP - Stack Overflow en español
Else - ROW
440591 name= ' nombres ' id= ' nombres ' class= ' form-control span8 tip ' type= ' text ' required/> $(document).ready(function () { $('#nombres').typeahead({ source: function (busqueda, resultado) { $.ajax({ url: ' consulta.php ' , data: 'busqueda=' + busqueda, datatype: ' json ' , type: ' post ' , success: function (data) { resultado($.map(data, function (item) { return item; })); } }); } }); }); <?phpinclude( ' conn.php ' );$palabraclave = strval($_post['busqueda']);$busqueda = ' {$palabraclave}% ' ;$consultadb = $conn->prepare( ' select * from clientes where nombr 440591 Autocompletar con PHP y MySQL - Stack Overflow en español
Else - ROW
440700 compras_productos insert 38422650 Hibernate: Repeated column in mapping for entity - Stack Overflow
Else - ROW
440700 compras_productos insert 64069005 Error executing DDL alter table if exists task.city - Stack Overflow
Else - ROW
440700 compras_productos insert 52138779 MySQL Error: #1142 - TRIGGER command denied - Stack Overflow
Else - ROW
440700 compras_productos insert 24727610 Spring Boot with Hibernate generating drop constraint errors on ...
Else - ROW
440700 compras_productos insert 47021237 <expr> expected, got '?' - Stack Overflow
Else - ROW
442022 $base = ' select empleados.id, empleados.nombre, empleados.id_area, area.nombrearea from empleados inner join area on empleados.id_area=area.id ' ;$nquery = mysqli_query($conn, $base); $base = ' select empleados.id, empleados.nombre, empleados.id_area, area.nombrearea, novedad.licencia1 from empleados inner join area on empleados.id_area=area.id inner join novedad ' ;$nquery = mysqli_query($conn, $base); 442022 Listados de empleados con y sin trabajo - Stack Overflow en español
Else - ROW
442478 create table `grupos` ( `grupoid` int(10) not null, `id_usuario` int(11) not null, `gnombre` varchar(100) not null, `descripcion` varchar(250) not null, `orden` varchar(11) not null, `activo` varchar(2) not null, `afechas` varchar(2) not null default 'no', `finicio` datetime not null default '0000-00-00 00:00:00', `ffinal` datetime not null default '0000-00-00 00:00:00') engine=myisam default charset=latin1;insert into `grupos` (`grupoid`, `id_usuario`, `gnombre`, `descripcion`, `orden`, `activo`, `afechas`, `finicio`, `ffinal`) values(1, 2604, 'libros', 'libros es', '1', '1', 'si', '2021-04-06 13:28:24', '2021-04-16 13:28:24'),(2, 2605, 'revistas', '', '1', '1', 'si', '2021-04-01 13:28:24', '2021-04-26 13:28:24'),(3, 2605, 'cuentos', '', '2', '1', 'no', '0000-00-00 00:00:00', '0000-00-00 00:00:00'); $congrupo = $cont->query("select * from grupos where id_usuario = 2605 and activo = 1 order by orden+0 asc"); 442478 Realizar una query - Stack Overflow en español
Else - ROW
442478 create table `grupos` ( `grupoid` int(10) not null, `id_usuario` int(11) not null, `gnombre` varchar(100) not null, `descripcion` varchar(250) not null, `orden` varchar(11) not null, `activo` varchar(2) not null, `afechas` varchar(2) not null default 'no', `finicio` datetime not null default '0000-00-00 00:00:00', `ffinal` datetime not null default '0000-00-00 00:00:00') engine=myisam default charset=latin1;insert into `grupos` (`grupoid`, `id_usuario`, `gnombre`, `descripcion`, `orden`, `activo`, `afechas`, `finicio`, `ffinal`) values(1, 2604, 'libros', 'libros es', '1', '1', 'si', '2021-04-06 13:28:24', '2021-04-16 13:28:24'),(2, 2605, 'revistas', '', '1', '1', 'si', '2021-04-01 13:28:24', '2021-04-26 13:28:24'),(3, 2605, 'cuentos', '', '2', '1', 'no', '0000-00-00 00:00:00', '0000-00-00 00:00:00'); $congrupo = $cont->query("select * from grupos where id_usuario = 2605 and activo = 1 order by orden+0 asc"); 259188 Problema al generar script sql, ERROR: Error 1064: You have an ...
Else - ROW
443305 //conection to server and database$servername = ' localhost ' ;$username = ' root ' ;$passwordserver = ' ' ;$dbname = ' lession ' ;//conection for mysqli$cnx = new mysqli($servername, $username, $passwordserver, $dbname);//check conecction if($cnx->connect_error){ die ( ' conection failed: ' .$cnx->connect_error);}$sql = ' select id, firstname, lastname, email, date from users ' ;$result = $cnx->query($sql);if ($result->num_rows > 0) { //out data for rows var_dump($result); var_dump($sql); while($row =$result->fetch_assoc()){ echo ' id: ' .$row['id']. ' - name: ' .$row['firstname']. ' ' .$row['lastname']. ' - email: ' .$row['email']. ' ' ; }}else{ echo ' 0 results ' ;} $cnx->close();?> ``` 443443 Error:SELECT command denied to user 'nailuj'@'localhost' for ...
Else - ROW
443305 //conection to server and database$servername = ' localhost ' ;$username = ' root ' ;$passwordserver = ' ' ;$dbname = ' lession ' ;//conection for mysqli$cnx = new mysqli($servername, $username, $passwordserver, $dbname);//check conecction if($cnx->connect_error){ die ( ' conection failed: ' .$cnx->connect_error);}$sql = ' select id, firstname, lastname, email, date from users ' ;$result = $cnx->query($sql);if ($result->num_rows > 0) { //out data for rows var_dump($result); var_dump($sql); while($row =$result->fetch_assoc()){ echo ' id: ' .$row['id']. ' - name: ' .$row['firstname']. ' ' .$row['lastname']. ' - email: ' .$row['email']. ' ' ; }}else{ echo ' 0 results ' ;} $cnx->close();?> ``` 443305 Tengo éste error Warning: Attempt to read property "num_rows" on ...
Else - ROW
443552 $('#insert_form').on( ' submit ' , function(event){ event.preventdefault(); if($('#nomcliente').val() == ' ' ){ alert( ' nombre del cliente es requerido. ' ); formdata += ' & ' + $('#submit_data').attr('name') + '=' + $('#submit_data').attr('value'); .done(function(data) { let res = json.parse(data); // si es true el estado if(res.status){ $('.success').fadein(); $('.success').html(res.message).delay(8000).fadeout(8000); $(frm)[0].reset(); $(frm).hide(); settimeout(function(){ location.href = 'account.php'; },9000); } else { for (let name in res.message) { $('.error').remove(); let msg = ' class= ' error ' >' + res.message[name] + ''; $(msg).insertafter($('[name=' + name + ']', '#upd_data')); $('.error').fadein().delay(5000).fadeout(5000); } } }) if (empty($_pos 12307112 Difference between $(document.body) and $('body') - Stack Overflow
Else - ROW
443552 $('#insert_form').on( ' submit ' , function(event){ event.preventdefault(); if($('#nomcliente').val() == ' ' ){ alert( ' nombre del cliente es requerido. ' ); formdata += ' & ' + $('#submit_data').attr('name') + '=' + $('#submit_data').attr('value'); .done(function(data) { let res = json.parse(data); // si es true el estado if(res.status){ $('.success').fadein(); $('.success').html(res.message).delay(8000).fadeout(8000); $(frm)[0].reset(); $(frm).hide(); settimeout(function(){ location.href = 'account.php'; },9000); } else { for (let name in res.message) { $('.error').remove(); let msg = ' class= ' error ' >' + res.message[name] + ''; $(msg).insertafter($('[name=' + name + ']', '#upd_data')); $('.error').fadein().delay(5000).fadeout(5000); } } }) if (empty($_pos 63921585 SharePoint online show or hide columns multiple fields required ...
Else - ROW
443552 $('#insert_form').on( ' submit ' , function(event){ event.preventdefault(); if($('#nomcliente').val() == ' ' ){ alert( ' nombre del cliente es requerido. ' ); formdata += ' & ' + $('#submit_data').attr('name') + '=' + $('#submit_data').attr('value'); .done(function(data) { let res = json.parse(data); // si es true el estado if(res.status){ $('.success').fadein(); $('.success').html(res.message).delay(8000).fadeout(8000); $(frm)[0].reset(); $(frm).hide(); settimeout(function(){ location.href = 'account.php'; },9000); } else { for (let name in res.message) { $('.error').remove(); let msg = ' class= ' error ' >' + res.message[name] + ''; $(msg).insertafter($('[name=' + name + ']', '#upd_data')); $('.error').fadein().delay(5000).fadeout(5000); } } }) if (empty($_pos 22979663 what can be the alternative for $('form')[0]? - Stack Overflow
Else - ROW
443552 $('#insert_form').on( ' submit ' , function(event){ event.preventdefault(); if($('#nomcliente').val() == ' ' ){ alert( ' nombre del cliente es requerido. ' ); formdata += ' & ' + $('#submit_data').attr('name') + '=' + $('#submit_data').attr('value'); .done(function(data) { let res = json.parse(data); // si es true el estado if(res.status){ $('.success').fadein(); $('.success').html(res.message).delay(8000).fadeout(8000); $(frm)[0].reset(); $(frm).hide(); settimeout(function(){ location.href = 'account.php'; },9000); } else { for (let name in res.message) { $('.error').remove(); let msg = ' class= ' error ' >' + res.message[name] + ''; $(msg).insertafter($('[name=' + name + ']', '#upd_data')); $('.error').fadein().delay(5000).fadeout(5000); } } }) if (empty($_pos 13815037 what does this jquery $('.some-class',$('#some-id')) mean? - Stack ...
Else - ROW
443552 $('#insert_form').on( ' submit ' , function(event){ event.preventdefault(); if($('#nomcliente').val() == ' ' ){ alert( ' nombre del cliente es requerido. ' ); formdata += ' & ' + $('#submit_data').attr('name') + '=' + $('#submit_data').attr('value'); .done(function(data) { let res = json.parse(data); // si es true el estado if(res.status){ $('.success').fadein(); $('.success').html(res.message).delay(8000).fadeout(8000); $(frm)[0].reset(); $(frm).hide(); settimeout(function(){ location.href = 'account.php'; },9000); } else { for (let name in res.message) { $('.error').remove(); let msg = ' class= ' error ' >' + res.message[name] + ''; $(msg).insertafter($('[name=' + name + ']', '#upd_data')); $('.error').fadein().delay(5000).fadeout(5000); } } }) if (empty($_pos 34331838 why does $('.classname') & document.getElementsByClassName ...
Else - ROW
443618 $query_parts = array(); for ($t=0;$t($fname);$t++) { $query_parts[] = ' ' ' . $fname[$t] . ' ' ' ; } for($i=1; $i<=$total; $i++){ $query_parts1[] = ' (' ' . date('y-m-d') . ' ',' ' . $_post[ ' lname$i ' ] . ' ',' ' . $_post[ ' lref$i ' ] . ' ', ' ' . $_post[ ' lobs$i ' ] . ' ') ' ; } ('t 109','t 108','t 12') ; ('2021-04-12','11111','1', '1'),('2021-04-12','22222','2', '2') ('t 109','2021-04-12','11111','1', '1'),('t 109','2021-04-12','22222','2', '2'),('t 108','2021-04-12','11111','1', '1'),('t 108','2021-04-12','22222','2', '2'),('t 12','2021-04-12','11111','1', '1'),('t 12','2021-04-12','22222','2', '2') 443618 Ordenar INSERT Mysql con 2 ciclos - Stack Overflow en español
Else - ROW
443940 id <?php if (isset($_post['ajaxdata'])) { $id = $_post[ ' item_id ' ]; $nombre_cliente = $_post[ ' nombre_cliente ' ]; if (empty($_post['nombre_cliente'])){ echo json_encode(['status'=> false, 'message'=> [ ' nombre_cliente ' => ' por favor, ingresé el nombre del cliente para continuar. ' ]]); exit; } if($id != '') { $stmt = $con->prepare( ' update cliente set nombre_cliente = ? where id_cliente = ? ' ); $stmt->bind_param( ' si ' , $nombre_cliente, $id ); if ($stmt->execute()) { echo json_encode(['status'=> true, 'message'=> ' los datos se actualizaron correctamente. ' ]); exit; } else { echo json_encode(['status'=> false, 'message'=> 443940 ¿Cómo evitar respuesta correcta, cuando la información enviada es ...
Else - ROW
444009 public function checkcantidad ($item, $devolverdatos){ $datoserror = new \stdclass; if(isset($item['valor1']) & & isset($item['valor2']) & & isset($item['valor3']) & & isset($item['valor4']) & & isset($item['valor5']) & & isset($item['valor6'])){ $query = ' select sum(valor1+valor2+valor3+valor4+valor5+valor6) as total from tabla1 where id= {$item['id']} if (total <> total = 'error') ' ; }else{ $datoserror->status = 1; $datoserror->mensaje = ' la suma no coincide con el registrado ' ; $devolverdatos->push($datoserror); return $item = 0; } return $item; } 444009 Actualizar datos si son iguales MySQL - Stack Overflow en español
Else - ROW
444577 sucursal $stmt = $con->prepare( ' update sucursal s set s.id_cliente = ?, s.tipo = ?, s.nombre = ?, s.ubicacion = ?, s.registro = ? inner join cliente c on s.id_cliente=c.id_cliente where s.id_sucursal = ? and c.id_usuario=? ' );$stmt->bind_param( ' issssii ' , $id_cliente, $tipo, $nombre, $ubicacion, $registro, $id_sucursal, $id_usuario ); $stmt->bind_param( ' issssii ' .... sucursal id_sucursal id_cliente tipo nombre ubicacion registro 1 2 x x x x 2 3 x x x x 3 2 x x x 444577 ¿Cómo actualizar datos en UPADATE INNER JOIN empleando ...
Else - ROW
444601 $( ' #export_excel ' ).submit(function(e) { console.log('exportar a excel'); e.preventdefault(); const postdata ={ search: $('#insumo_search').val(), franquicia:$('#franquicia').val() } $.post( './exportar_excel_prueba.php',postdata,function( ) { });}); 13327380 What does $('') mean in JavaScript? - Stack Overflow
Else - ROW
444601 $( ' #export_excel ' ).submit(function(e) { console.log('exportar a excel'); e.preventdefault(); const postdata ={ search: $('#insumo_search').val(), franquicia:$('#franquicia').val() } $.post( './exportar_excel_prueba.php',postdata,function( ) { });}); 53991282 checkbox determines mysqli query - Stack Overflow
Else - ROW
444601 $( ' #export_excel ' ).submit(function(e) { console.log('exportar a excel'); e.preventdefault(); const postdata ={ search: $('#insumo_search').val(), franquicia:$('#franquicia').val() } $.post( './exportar_excel_prueba.php',postdata,function( ) { });}); 44651464 Javascript AJAX using $(this) - Stack Overflow
Else - ROW
444601 $( ' #export_excel ' ).submit(function(e) { console.log('exportar a excel'); e.preventdefault(); const postdata ={ search: $('#insumo_search').val(), franquicia:$('#franquicia').val() } $.post( './exportar_excel_prueba.php',postdata,function( ) { });}); 36904733 Uncaught TypeError: $(...). is not a function... flagged after upgrade ...
Else - ROW
444601 $( ' #export_excel ' ).submit(function(e) { console.log('exportar a excel'); e.preventdefault(); const postdata ={ search: $('#insumo_search').val(), franquicia:$('#franquicia').val() } $.post( './exportar_excel_prueba.php',postdata,function( ) { });}); 3388019 $(this) OR event.target OR var input = $(this) - Stack Overflow
Else - ROW
444889 select id, (((acos(sin((4.127877*pi()/180)) * sin((lat*pi()/180)) + cos((4.127877*pi()/180)) * cos((lat*pi()/180)) * cos(((-73.618126- lon) * pi()/180)))) * 180/pi()) * 60 * 1.1515 * 1.609344) as distance from personas having distance <= 50 limit 1 $cercano=mysqli_query($con, ' select id, (((acos(sin((4.127877*pi()/180)) * sin((lat*pi()/180)) + cos((4.127877*pi()/180)) * cos((lat*pi()/180)) * cos(((-73.618126- lon) * pi()/180)))) * 180/pi()) * 60 * 1.1515 * 1.609344) as distance from personas having distance <= 5 limit 1 ' );$cercan = mysqli_fetch_assoc($cercano);$cerca = implode($cercan[0]); 444889 ¿Cómo obtener el valor devuelto por mysqli_fetch_assoc? - Stack ...
Else - ROW
444889 select id, (((acos(sin((4.127877*pi()/180)) * sin((lat*pi()/180)) + cos((4.127877*pi()/180)) * cos((lat*pi()/180)) * cos(((-73.618126- lon) * pi()/180)))) * 180/pi()) * 60 * 1.1515 * 1.609344) as distance from personas having distance <= 50 limit 1 $cercano=mysqli_query($con, ' select id, (((acos(sin((4.127877*pi()/180)) * sin((lat*pi()/180)) + cos((4.127877*pi()/180)) * cos((lat*pi()/180)) * cos(((-73.618126- lon) * pi()/180)))) * 180/pi()) * 60 * 1.1515 * 1.609344) as distance from personas having distance <= 5 limit 1 ' );$cercan = mysqli_fetch_assoc($cercano);$cerca = implode($cercan[0]); 444889 php - OBTENER UN VALOR DE CONSULTA SQL - Stack Overflow ...
https://stackoverflow.com/questions?page=10431&sort=newest
Else - ROW
446159 <?php$db = mysqli_connect('localhost', 'root', '', 'appsalon');$db->set_charset('utf8');if(!$db){ echo 'error en la conexión';}<?phpfunction obtenerservicios() : array { try { // importar una conexión require 'database.php'; $db->set_charset('utf8'); // escribir el código sql $sql = 'select * from servicios;'; $consulta = mysqli_query($db, $sql); // arreglo vacio $i = 0; $servicios = []; // obtener los resultados while($row = mysqli_fetch_assoc($consulta)){ $servicios[$i] ['id'] = $row['id']; $servicios[$i] ['nombre'] = $row['nombre']; $servicios[$i] ['precio'] = $row['precio']; $i++; } // echo ' ' ; // var_dump($servicios); // echo ' ' ; return $servicios; } catch (\throwable $th) { //throw $th; var_dump($th);} <?phprequire 'includes/funciones.php';$servicios = obtenerservicios();echo json_encode($servicios); 446159 Problema con el echo json_encode($servicios) - Stack Overflow en ...
Else - ROW
446890 html .flex { display: flex;}.justify-between { justify-content: space-between;}.table-header { padding: 2em;}.search-bar { position: relative; width: 34%;}input.go-seeker { border-radius: 1em; right: 0; transition: all .3s ease-in-out; width: 50%; position: absolute; padding: 1.2em; height: 2.4em; border: 1px solid #363535; background-color: #333; outline: none; color: #999898;}input.go-seeker:focus { width: 70%; right: 0em;}button.btn-search { display: block !important; background-color: transparent; border-radius: 50%; height: 2em; right: 2px; top: 0.3em; transition: all .3s ease-in-out; width: 2em; border: none; position: absolute; outline: none;}button.btn-search:before { content: ""; display: inline-block; background-size: cover; position: relative; top: 4px; width: 25px; height: 25px; background-image: url(../img/website/svg/search.svg);}.table { width: 100%; max 446890 ¿Cómo mostrar datos en paginación usando Ajax, JSON, PHP y ...
Else - ROW
447134 <?php$login = mysqli_query($base, ' select *from $tabla where user = '$user' and password = '$password' ' );if (count(mysqli_fetch_array($login)) > 0) { echo ' class= ' text ' >hola';}?> 447134 ¿Por qué MySQL no valida la contraseña si está en mayúscula o ...
http://es.stackoverflow.com/tags/phpmyadmin/hot?filter=all
Else - ROW
447134 <?php$login = mysqli_query($base, ' select *from $tabla where user = '$user' and password = '$password' ' );if (count(mysqli_fetch_array($login)) > 0) { echo ' class= ' text ' >hola';}?> 176012 me sale este error:Notice: Undefined index: id in C:\xampp\htdocs ...
Else - ROW
447134 <?php$login = mysqli_query($base, ' select *from $tabla where user = '$user' and password = '$password' ' );if (count(mysqli_fetch_array($login)) > 0) { echo ' class= ' text ' >hola';}?> 100239 ¿Por qué me sale el error "Call to undefined function mysql_connect ...
Else - ROW
447134 <?php$login = mysqli_query($base, ' select *from $tabla where user = '$user' and password = '$password' ' );if (count(mysqli_fetch_array($login)) > 0) { echo ' class= ' text ' >hola';}?> 430380 mysqli_fetch_assoc() expects parameter 1 to be mysqli_result ...
Else - ROW
447142 PHP no envia datos a base de datos mysqli - Stack Overflow en ...
Else - ROW
447559 Php no envia datos a base de datos mysqli cuando ... - Stack Overflow
Else - ROW
52114 Mysqli con php guarda todos los datos vacios - Stack Overflow en ...
Else - ROW
176012 me sale este error:Notice: Undefined index: id in C:\xampp\htdocs ...
Else - ROW
190779 Efectuar redireccion después de enviar correo en PHP - Stack ...
https://es.stackoverflow.com/sitemap-questions-1.xml
Else - ROW
448334 type=\ ' button\ ' class=\ ' btn btn-primary lapiz\ ' data-descr3=\ ' $marca_sub\ ' data- descr2=\ ' $nombre_sub\ ' data-descr1=\ ' $categoria\ ' data-descr=\ ' $id_sub\ ' data-toggle=\ ' modal\ ' data- target=\ ' .editar\ ' > class=\ ' fas fa-pen\ ' > class= ' modal fade editar ' tabindex= ' -1 ' role= ' dialog ' aria-labelledby= ' mylargemodallabel ' aria- hidden= ' true ' > class= ' modal-dialog modal-lg ' > class= ' modal-content ' > class= ' modal-header ' > class= ' modal-title ' id= ' examplemodallabel ' >editar sub categoría type= ' button ' class= ' close ' data-dismiss= ' modal ' aria-label= ' close ' > aria-hidden= ' true ' > & times; class= ' modal-body ' > action= ' bm.php ' method= ' post ' > class= ' form-group ' > name= ' id_cat1 ' type= ' text ' class= ' form-control ' value= ' ' > class= ' form-group ' > style= ' width: 100% ' >categoría name= ' categoria1 ' id= 448334 mostrar select option correcto cuando se le pasa un id mediante ...
Else - ROW
448770 <?php require '../../includes/config/database.php'; $db = conectardb(); if ($_server[ ' request_method ' ] === 'post') { echo ' ' ; var_dump($_post); echo ' ' ; $titulo = $_post['titulo']; $precio = $_post['precio']; $descripcion = $_post['descripcion']; $habitaciones = $_post['habitaciones']; $wc = $_post['wc']; $estacionamiento = $_post['estacionamiento']; $vendedor = $_post['vendedor']; } require '../../includes/funciones.php'; incluirtemplate('header'); ?> class= ' contenedor seccion ' > crear action= ' /admin/propiedades/crear.php ' method= ' post ' class= ' formulario ' > informacion general for= ' titulo ' >titulo name= ' titulo ' type= ' text ' id= ' titulo ' placeholder= ' titulo propiedad ' > for= ' precio ' >precio name= ' precio ' type= ' number ' id= ' precio ' placeholder= 448770 Error con el variable global $_POST en PHP no me retorna el dato ...
https://es.stackoverflow.com/q/460545
Else - ROW
449197 select personas.nombrefrom personas p, canciones c, discos d, canciones-discos cdwhere p.pid = d.compositor_id and c.cid = cd.cid and cd.dcid = d.dcid and d.compositor_id = (select compositor_id from personas p, canciones c, discos d, canciones-discos cd where p.pid = d.compositor_id and c.cid = cd.cid and cd.dcid = d.dcid personas.nombre = francisca valenzuela); 449197 En una base de datos en la que personas están agrupadas en una ...
Else - ROW
449399 sobre todo me gustaria saber si hay mas personas que tiene complicaciones usando este host a la hora de hacer conexiones con bd, sospecho que tal vez son las limitantes del plan gratis:(. ``` <?php $con = new mysqli( ' localhost ' , ' id16743170_root ' , ' <)u2cr+ {p{_52yjj ' , ' id16743170_sistemarepau ' ); if (isset($_post['save_fecha'])){ $lugar = $_post['lugar']; $fecha = $_post['fecha']; $hora = $_post['hora']; $tiempo = $_post['tiempo']; $query = ' insert into reservaciones (lugar,fecha,hora,tiempo) values ('$lugar','$fecha','$hora','$tiempo') ' ; $query_run = mysqli_query($con,$query); if ($query_run){ header( ' location: reservas.php ' ); mysqli_close ($con); }else{ 449399 ¿Alguien sabe si hay algun problema con mi codigo de conexion a ...
Else - ROW
449408 try{ $conexion->autocommit(false); $error = true;//esta es la primera insercion $sql= ' insert into tblentradas(entnrofactura, enttotal, entfecha, entproveedor, entestado, entusuario) values (?,?,?,?,?,?) ' ; $result=$conexion->consulta($sql,'sdsiii',array($nfacturac,$total,$fecha,$proveedor,$estado,$ruser)); if ( !$result) { $error = false; } // segunda insercion la realizo con un for que recorre una variable de session para el detalle de venta session_start(); $datos=($_session['carritocompras']); 449408 Insertar Venta y detalle de venta con Transacciones en mysql con ...
Else - ROW
451025 id= ' table ' class= ' table table-bordered border-success table-striped text-center ' > id class= ' w-25 ' >nombres cédula cargo nucleo fecha ingreso estado acciones class= ' empleados ' > <?php include_once 'empleados.php' ?> <?phpinclude( ' conn.php ' );$sql = mysqli_query($conn, ' select * from clientes ' );$row = mysqli_fetch_assoc($sql);while ($row = mysqli_fetch_assoc($sql)) { echo ' ' . $row['id'] . ' 451025 No me muestra el primer registro de mi tabla Datatables - Stack ...
Else - ROW
451575 select titulo from eventos where organizador= ' pepito ' $evento = mysqli_query($con, ' select titulo from eventos where organizador='$organizador' ' );$eve= mysqli_fetch_array($evento);for($cont2=0;$cont2($eve);$cont2++){ $eventos = mysqli_query($con, ' select titulo, fecha, hora, from eventos where titulo='$eve[$cont2]' ' ); $eve[0] 451575 consulta sql con varios resultados - Stack Overflow en español
Else - ROW
455016 id --- primary keyuidmailfecha insert into s1_subscripciones (uid, mail, fecha) values ('5', '[email protected]', '25-05-2021'); id --- 1uid ---5mail --- [email protected] --- 25-05-2021 id --- 6uid ---51mail --- [email protected] --- 25-05-2021 455016 Problemas con PRIMARY key - Stack Overflow en español
Else - ROW
455126 $(document).ready(function() { $( ' #nombres ' ).autocomplete({ source: ' buscarempleado.php ' , minlength: 2 }); $( ' #nombres ' ).focusout(function() { $.ajax({ url: 'empleado.php', type: 'post', datatype: 'json', data: { nombres: $('#nombres').val() } }).done(function(respuesta) { $( ' #cedula ' ).val(respuesta.cedula); $( ' #cargo ' ).val(respuesta.cargo); $( ' #proceso ' ).val(respuesta.proceso); $( ' #nucleo ' ).val(respuesta.nucleo); $( ' #eps ' ).val(respuesta.eps); }); }); }); <?php$matricula = $_get['term'];include ( ' conn.php ' ); $consulta = ' select nombres from clientes where nombres like '%$nombres%' ' ; $result = $conn->query($consulta); if($result->num_rows > 0){ while($fila = $result->fetch_array()){ 9899372 Pure JavaScript equivalent of jQuery's $.ready() - how to call a ...
Else - ROW
455126 $(document).ready(function() { $( ' #nombres ' ).autocomplete({ source: ' buscarempleado.php ' , minlength: 2 }); $( ' #nombres ' ).focusout(function() { $.ajax({ url: 'empleado.php', type: 'post', datatype: 'json', data: { nombres: $('#nombres').val() } }).done(function(respuesta) { $( ' #cedula ' ).val(respuesta.cedula); $( ' #cargo ' ).val(respuesta.cargo); $( ' #proceso ' ).val(respuesta.proceso); $( ' #nucleo ' ).val(respuesta.nucleo); $( ' #eps ' ).val(respuesta.eps); }); }); }); <?php$matricula = $_get['term'];include ( ' conn.php ' ); $consulta = ' select nombres from clientes where nombres like '%$nombres%' ' ; $result = $conn->query($consulta); if($result->num_rows > 0){ while($fila = $result->fetch_array()){ 3512445 $(document).ready() or $(function()) -- Which to use? - Stack Overflow
Else - ROW
455126 $(document).ready(function() { $( ' #nombres ' ).autocomplete({ source: ' buscarempleado.php ' , minlength: 2 }); $( ' #nombres ' ).focusout(function() { $.ajax({ url: 'empleado.php', type: 'post', datatype: 'json', data: { nombres: $('#nombres').val() } }).done(function(respuesta) { $( ' #cedula ' ).val(respuesta.cedula); $( ' #cargo ' ).val(respuesta.cargo); $( ' #proceso ' ).val(respuesta.proceso); $( ' #nucleo ' ).val(respuesta.nucleo); $( ' #eps ' ).val(respuesta.eps); }); }); }); <?php$matricula = $_get['term'];include ( ' conn.php ' ); $consulta = ' select nombres from clientes where nombres like '%$nombres%' ' ; $result = $conn->query($consulta); if($result->num_rows > 0){ while($fila = $result->fetch_array()){ 48504906 best way to call $(document).ready() function - Stack Overflow
Else - ROW
455126 $(document).ready(function() { $( ' #nombres ' ).autocomplete({ source: ' buscarempleado.php ' , minlength: 2 }); $( ' #nombres ' ).focusout(function() { $.ajax({ url: 'empleado.php', type: 'post', datatype: 'json', data: { nombres: $('#nombres').val() } }).done(function(respuesta) { $( ' #cedula ' ).val(respuesta.cedula); $( ' #cargo ' ).val(respuesta.cargo); $( ' #proceso ' ).val(respuesta.proceso); $( ' #nucleo ' ).val(respuesta.nucleo); $( ' #eps ' ).val(respuesta.eps); }); }); }); <?php$matricula = $_get['term'];include ( ' conn.php ' ); $consulta = ' select nombres from clientes where nombres like '%$nombres%' ' ; $result = $conn->query($consulta); if($result->num_rows > 0){ while($fila = $result->fetch_array()){ 10595913 jQuery $( function() {} ) and $(document).ready the same?
Else - ROW
455126 $(document).ready(function() { $( ' #nombres ' ).autocomplete({ source: ' buscarempleado.php ' , minlength: 2 }); $( ' #nombres ' ).focusout(function() { $.ajax({ url: 'empleado.php', type: 'post', datatype: 'json', data: { nombres: $('#nombres').val() } }).done(function(respuesta) { $( ' #cedula ' ).val(respuesta.cedula); $( ' #cargo ' ).val(respuesta.cargo); $( ' #proceso ' ).val(respuesta.proceso); $( ' #nucleo ' ).val(respuesta.nucleo); $( ' #eps ' ).val(respuesta.eps); }); }); }); <?php$matricula = $_get['term'];include ( ' conn.php ' ); $consulta = ' select nombres from clientes where nombres like '%$nombres%' ' ; $result = $conn->query($consulta); if($result->num_rows > 0){ while($fila = $result->fetch_array()){ 7108627 Can i use more than once $(document).ready() of jquery in a single ...
Else - ROW
455698 $fp = fopen( ' ../../set.txt ' , ' r ' ); $linea = fgets($fp); $data = explode( ' & & ; ' , $linea); $compid = $data[0]; fclose($fp); $ccode = $_get['cat']; $conectar = mysqli_connect( ' localhost ' , ' root ' , ' ' , ' ianseo ' ); mysqli_set_charset($conectar, ' utf8 ' ); $sql = ' select * from events where evcode = '$ccode' and evtournament = '$compid' ' ; $result = mysqli_query($conectar,$sql); while($mostrar=mysqli_fetch_array($result)){ $cat = $mostrar['eveventname']; } 455698 Hacer consulta sql con variable php - Stack Overflow en español
Else - ROW
455931 warning: mysqli_num_rows() expects parameter 1 to be mysqli_result, bool given in c:\nxammp\htdocs\proyecto_animales en peligro de exincion\pro.php on line 14 warning: mysqli_free_result() expects parameter 1 to be mysqli_result, bool given in c:\nxammp\htdocs\proyecto_animales en peligro de exincion\pro.php on line 27 <?php$usuario = $_post['usuario'];$contraseña = $_post['contraseña'];session_start();$_session['usuario'] = $usuario;include('conexion.php');$conexion = mysqli_connect( ' localhost ' , ' root ' , ' ' , ' registros ' );$consulta = ' select * from datos where usuario='$usuario' and contraseña='$contraseña' ' ;$resultado = mysqli_query($conexion,$consulta);$filas = mysqli_num_rows($resultado);if($filas) { header( ' location:paginaprincipal.php ' );} else { ?> <?php include( ' sesion.html ' ); ?> error en la autentificacion <?php}mysqli_free_result($resultado);mysqli_close($conexion);?> 455931 No sé cuál es el error - Stack Overflow en español
Else - ROW
455931 warning: mysqli_num_rows() expects parameter 1 to be mysqli_result, bool given in c:\nxammp\htdocs\proyecto_animales en peligro de exincion\pro.php on line 14 warning: mysqli_free_result() expects parameter 1 to be mysqli_result, bool given in c:\nxammp\htdocs\proyecto_animales en peligro de exincion\pro.php on line 27 <?php$usuario = $_post['usuario'];$contraseña = $_post['contraseña'];session_start();$_session['usuario'] = $usuario;include('conexion.php');$conexion = mysqli_connect( ' localhost ' , ' root ' , ' ' , ' registros ' );$consulta = ' select * from datos where usuario='$usuario' and contraseña='$contraseña' ' ;$resultado = mysqli_query($conexion,$consulta);$filas = mysqli_num_rows($resultado);if($filas) { header( ' location:paginaprincipal.php ' );} else { ?> <?php include( ' sesion.html ' ); ?> error en la autentificacion <?php}mysqli_free_result($resultado);mysqli_close($conexion);?> 105641 Warning: mysqli_query() expects parameter 1 to be mysqli
###Markdown
**Respuesta de la API de google customize search**
###Code
data
###Output
_____no_output_____
###Markdown
**Guardado de Resultados del query de forma individual (100 por dia)**
###Code
unoF = queries_results_df
unoF.to_excel(path_xlsx + '/unoF.xlsx', encoding='utf-8', engine='xlsxwriter')
unoF.to_csv(path_csv + '/unoF.csv', encoding='utf-8')
dosF = queries_results_df
dosF.to_excel(path_xlsx +'/dosF.xlsx', encoding='utf-8', engine='xlsxwriter')
dosF.to_csv(path_csv + '/dosF.csv', encoding='utf-8')
tresF = queries_results_df
tresF.to_excel(path_xlsx +'/tresF.xlsx', encoding='utf-8', engine='xlsxwriter')
tresF.to_csv(path_csv + '/tresF.csv', encoding='utf-8')
cuatroF = queries_results_df
cuatroF.to_excel(path_xlsx +'/cuatroF.xlsx', encoding='utf-8', engine='xlsxwriter')
cuatroF.to_csv(path_csv + '/cuatroF.csv', encoding='utf-8')
cincoF = queries_results_df
cincoF.to_excel(path_xlsx +'/cincoF.xlsx', encoding='utf-8', engine='xlsxwriter')
cincoF.to_csv(path_csv + '/cincoF.csv', encoding='utf-8')
seisF = queries_results_df
seisF.to_excel(path_xlsx +'/seisF.xlsx', encoding='utf-8', engine='xlsxwriter')
seisF.to_csv(path_csv + '/seisF.csv', encoding='utf-8')
sieteF = queries_results_df
sieteF.to_excel(path_xlsx +'/sieteF.xlsx', encoding='utf-8', engine='xlsxwriter')
sieteF.to_csv(path_csv + '/sieteF.csv', encoding='utf-8')
ochoF = queries_results_df
ochoF.to_excel(path_xlsx +'/ochoF.xlsx', encoding='utf-8', engine='xlsxwriter')
ochoF.to_csv(path_csv + '/ochoF.csv', encoding='utf-8')
nueveF = queries_results_df
nueveF.to_excel(path_xlsx +'/nueveF.xlsx', encoding='utf-8', engine='xlsxwriter')
nueveF.to_csv(path_csv + '/nueveF.csv', encoding='utf-8')
diezF = queries_results_df
diezF.to_excel(path_xlsx +'/diezF.xlsx', encoding='utf-8', engine='xlsxwriter')
diezF.to_csv(path_csv + '/diezF.csv', encoding='utf-8')
onceF = queries_results_df
onceF.to_excel(path_xlsx +'/onceF.xlsx', encoding='utf-8', engine='xlsxwriter')
onceF.to_csv(path_csv + '/onceF.csv', encoding='utf-8')
doceF = queries_results_df
doceF.to_excel(path_xlsx +'/doceF.xlsx', encoding='utf-8', engine='xlsxwriter')
doceF.to_csv(path_csv + '/doceF.csv', encoding='utf-8')
treceF = queries_results_df
treceF.to_excel(path_xlsx +'/treceF.xlsx', encoding='utf-8', engine='xlsxwriter')
treceF.to_csv(path_csv + '/treceF.csv', encoding='utf-8')
catorceF = queries_results_df
catorceF.to_excel(path_xlsx +'/catorceF.xlsx', encoding='utf-8', engine='xlsxwriter')
catorceF.to_csv(path_csv + '/catorceF.csv', encoding='utf-8')
quinceF = queries_results_df
quinceF.to_excel(path_xlsx +'/quinceF.xlsx', encoding='utf-8', engine='xlsxwriter')
quinceF.to_csv(path_csv + '/quinceF.csv', encoding='utf-8')
dieciseisF = queries_results_df
dieciseisF.to_excel(path_xlsx +'/dieciseisF.xlsx', encoding='utf-8', engine='xlsxwriter')
dieciseisF.to_csv(path_csv + '/dieciseisF.csv', encoding='utf-8')
###Output
_____no_output_____
###Markdown
**Guardado de Resultados en un solo archivo**
###Code
frames = [unoF, dosF, tresF, cuatroF, cincoF, seisF, sieteF, ochoF, nueveF, diezF, onceF, doceF, treceF, catorceF, quinceF, dieciseisF]
result = pd.concat(frames)
result
###Output
_____no_output_____
###Markdown
**Primera data**
###Code
result.to_excel(path_xlsx + '/query_final.xlsx', encoding='utf-8', engine='xlsxwriter')
result.to_csv(path_csv + '/query_final.csv', encoding='utf-8')
###Output
_____no_output_____
###Markdown
**Segunda data**
###Code
result.to_excel(path_xlsx + '/query_final_00.xlsx', encoding='utf-8', engine='xlsxwriter')
result.to_csv(path_csv + '/query_final_00.csv', encoding='utf-8')
###Output
_____no_output_____
###Markdown
Modelos--- **Lectura del archivo con sus duplicados realizados de forma manual**
###Code
# q_final = pd.read_excel(dataset + '/query_final.xlsx', index_col=0)
# q_final.head()
q_final = pd.read_excel(path_xlsx + '/query_final_1908.xlsx', index_col=0)
q_final
es = q_final['Duplicidad'].notnull()
q_final = q_final[es]
q_final['Duplicidad'] = q_final['Duplicidad'].astype(int)
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""Entry point for launching an IPython kernel.
###Markdown
**Lectura del dataset de SOES**
###Code
soes_data = pd.read_excel(path_xlsx + '/soes_mysqli_00.xlsx')
soes_data.loc[:0]
###Output
_____no_output_____
###Markdown
**Lectura del dataset de SO**
###Code
so_data = pd.read_excel(path_xlsx + '/so_mysqli_00.xlsx', index_col=0)
so_data.loc[:0]
###Output
_____no_output_____
###Markdown
**Filtrar coincidencias**
###Code
es_en_matches_df = q_final.loc[q_final['id_res'].isin(so_data.Id)]
es_en_matches_df.head()
###Output
_____no_output_____
###Markdown
**Reseteo de index**
###Code
es_en_matches_df.reset_index(inplace=True)
es_en_matches_df.head()
###Output
_____no_output_____
###Markdown
**Generar dataframe de coincidencias**
###Code
matches_index = ['id_es', 'body_es', 'tags_es', 'id_en', 'body_en', 'tags_en']
matches_df = pd.DataFrame(columns = matches_index)
for index, row in es_en_matches_df.iterrows():
aux_soes_df = soes_data.loc[soes_data['Id'] == row['Id']]
aux_soes_df = aux_soes_df[['Id', 'pre_body', 'Tags']].astype(str)
aux_so_df = so_data.loc[so_data['Id'] == row['id_res']]
aux_so_df = aux_so_df[['Id', 'pre_body', 'Tags']].astype(str)
match_lst = aux_soes_df.values.tolist()[0] + aux_so_df.values.tolist()[0]
matches_df = matches_df.append(pd.Series(match_lst, index = matches_index), ignore_index=True)
matches_df
###Output
_____no_output_____
###Markdown
**Filtrar cuerpos vacios**
###Code
filtered_df = matches_df[matches_df['body_es'] != ' ']
filtered_df
matches_df['duplicate'] = es_en_matches_df.Duplicidad
matches_df
matches_df['duplicate'].value_counts()
###Output
_____no_output_____
###Markdown
**Guardado de data para entrenamiento de modelos**
###Code
matches_df.to_excel(path_xlsx +'/matches_complete.xlsx', encoding='utf-8', engine='xlsxwriter')
matches_df.to_csv(path_csv +'/matches_complete.csv', encoding='utf-8')
matches_df.to_pickle(path_pkl +'/matches_complete.pkl')
###Output
_____no_output_____
###Markdown
**MODELOS**---
###Code
model_path = '/content/drive/MyDrive/DC/SO_vectors_200.bin'
word_vect = KeyedVectors.load_word2vec_format(model_path, binary=True)
word_vect.init_sims(replace=True) #normalizar vectores
matches_df = pd.read_pickle(path_pkl +'/matches_complete.pkl')
###Output
_____no_output_____
###Markdown
**Aplicacion de Word Mover Distance**
###Code
matches_df['bodies_wmd'] = matches_df.apply(lambda matches_df: word_vect.wmdistance(matches_df['body_es'], matches_df['body_en']), axis=1)
matches_df['tags_wmd'] = matches_df.apply(lambda matches_df: word_vect.wmdistance(matches_df['tags_es'], matches_df['tags_en']), axis=1)
matches_df
matches_df.groupby(['duplicate']).count()
###Output
_____no_output_____
###Markdown
**Eliminación de valores infinitos**
###Code
ex = matches_df['bodies_wmd'] != np.inf
matches_df = matches_df[ex]
matches_df.groupby(['duplicate']).count()
matches_df
ex = matches_df['bodies_wmd'] <= 0.10
test0 = matches_df[ex]
test0.groupby(['bodies_wmd'], sort=True).count()
test0['duplicate'] = 1
test0
ex = matches_df['bodies_wmd'] > 0.10
test00 = matches_df[ex]
test00.groupby(['bodies_wmd'], sort=True).count()
tp = [test0, test00]
mt_df = pd.concat(tp)
mt_df.groupby(['duplicate'], sort=True).count()
# ex = matches_df['duplicate'] == 1
# test = matches_df[ex]
# test.groupby(['bodies_wmd'], sort=True).count()
# dp = test['bodies_wmd'] < 0.80
# part1 = test[dp]
# ex = matches_df['duplicate'] == 0
# test2 = matches_df[ex]
# test2.groupby(['bodies_wmd'], sort=True).count()
# th = test2['bodies_wmd'] > 0.10
# part2 = test2[th]
# te = [part1, part2]
# mt_df = pd.concat(te)
mt_df.to_pickle(path_pkl +'/querys.pkl')
mt_df.groupby(['duplicate']).count()
mt_df.head(1)
###Output
_____no_output_____
###Markdown
**KERAS**
###Code
array = mt_df.values
X = array[ : , 7:9]
Y = array[ : , 6]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=test_size, random_state=seed)
X_train.shape
X_test.shape
X = np.asarray(X_train, dtype=np.float32)
Y = np.asarray(Y_train, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
Y_test = np.asarray(Y_test, dtype=np.float32)
X[:5]
Y[:5]
def plot_samples(Y_plot):
one_cnt = np.count_nonzero(Y_plot)
zero_cnt = len(Y_plot) - one_cnt
x_bar = [0, 1]
y_bar = [zero_cnt, one_cnt]
tick_label = ['0', '1']
plt.bar(x_bar, y_bar, tick_label = tick_label, color = ['red', 'green'])
Y_test
nodup = Y_test == 0
len(Y_test[nodup])
dup = Y_test == 1
len(Y_test[dup])
import matplotlib.pyplot as plt
plot_samples(Y_train)
import matplotlib.pyplot as plt
plot_samples(Y_test)
model = Sequential()
model.add(Dense(12, input_dim=2, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[tf.keras.metrics.Recall()])
model.fit(X, Y, epochs=1000, batch_size=1)
predictions_keras = (model.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(Y_test, predictions_keras)
print('Exactitud: %.2f' % accuracy_score(Y_test, predictions_keras))
print('Exhaustividad: %.2f' % recall_score(Y_test, predictions_keras))
print('Precisión: %.2f' % precision_score(Y_test, predictions_keras))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Dect.', 'Duplicadas Dect.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
lr_auc = roc_auc_score(Y_test, predictions_keras)
lr_auc
classes_x = np.argmax(predictions_keras,axis=1)
# classes_x = np.round(predictions_keras).astype(int)
fpr, tpr, thresholds = roc_curve(Y_test, classes_x)
gmeans = sqrt(tpr * (1-fpr))
ix = argmax(gmeans)
print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[ix], gmeans[ix]))
# plot the roc curve for the model
plt.plot([0,1], [0,1], linestyle='--', label='No Skill')
plt.plot(fpr, tpr, marker='.', label='Logistic')
# axis labels
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best')
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.legend()
# show the plot
plt.show()
###Output
Best Threshold=1.000000, G-Mean=0.000
###Markdown
RANDOM FOREST https://www.kaggle.com/prashant111/random-forest-classifier-tutorial
###Code
mt_df
X = mt_df[['bodies_wmd','tags_wmd']]
y = mt_df[['duplicate']]
# dividir los datos en conjuntos de entrenamiento y prueba
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)
# comprobar la forma de X_train y X_test
X_train.shape, X_test.shape
from sklearn.ensemble import RandomForestClassifier
# instantiate the classifier
rfc = RandomForestClassifier(random_state=0)
# fit the model
rfc.fit(X_train, y_train)
# Predict the Test set results
y_pred = rfc.predict(X_test)
# Check accuracy score
from sklearn.metrics import accuracy_score
print('Model accuracy score with 10 decision-trees : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
predictions_rf = rfc.predict(X_test)
cm_wmdft_nn = confusion_matrix(y_test, predictions_rf)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions_rf))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions_rf))
print('Precisión: %.2f' % precision_score(y_test, predictions_rf))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
print(classification_report(y_test,y_pred))
rfc_100 = RandomForestClassifier(n_estimators=100, random_state=0)
# fit the model to the training set
rfc_100.fit(X_train, y_train)
# Predict on the test set results
y_pred_100 = rfc_100.predict(X_test)
# Check accuracy score
print('Model accuracy score with 100 decision-trees : {0:0.4f}'. format(accuracy_score(y_test, y_pred_100)))
predictions_rf_100 = (rfc.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(y_test, predictions_rf_100)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions_rf_100))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions_rf_100))
print('Precisión: %.2f' % precision_score(y_test, predictions_rf_100))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
print(classification_report(y_test,y_pred))
from numpy import sqrt
from numpy import argmax
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
yhat = rfc.predict_proba(X_test)
yhat = yhat[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, yhat)
gmeans = sqrt(tpr * (1-fpr))
ix = argmax(gmeans)
print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[ix], gmeans[ix]))
# plot the roc curve for the model
plt.plot([0,1], [0,1], linestyle='--', label='No Skill')
plt.plot(fpr, tpr, marker='.', label='Logistic')
# axis labels
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best')
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.legend()
# show the plot
plt.show()
feature_list = list(X.columns)
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rfc.estimators_[5]
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rfc.estimators_[5]
# Export the image to a dot file
export_graphviz(tree, out_file = 'tree.dot', feature_names = feature_list, rounded = True, precision = 1)
# Use dot file to create a graph
(graph, ) = pydot.graph_from_dot_file('tree.dot')
# Write graph to a png file
graph.write_png('tree.png')
###Output
_____no_output_____
###Markdown
SVM
###Code
mt_df
X = mt_df[['bodies_wmd','tags_wmd']]
y = mt_df[['duplicate']]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
###Output
_____no_output_____
###Markdown
Running SVM with default hyperparameter.
###Code
from sklearn.svm import SVC
from sklearn import metrics
svc_h=SVC()
from sklearn.svm import SVC
from sklearn import metrics
svc_h=SVC(max_iter=20, probability=True) #Default hyperparameters
svc_h.fit(X_train,y_train)
y_pred=svc_h.predict(X_test)
print('Accuracy Score:')
print(metrics.accuracy_score(y_test,y_pred))
predictions_svm = (svc_h.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(y_test, predictions_svm)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions_svm))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions_svm))
print('Precisión: %.2f' % precision_score(y_test, predictions_svm))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
yhat = svc_h.predict_proba(X_test)
yhat = yhat[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, yhat)
gmeans = sqrt(tpr * (1-fpr))
ix = argmax(gmeans)
print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[ix], gmeans[ix]))
# plot the roc curve for the model
plt.plot([0,1], [0,1], linestyle='--', label='No Skill')
plt.plot(fpr, tpr, marker='.', label='Logistic')
# axis labels
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.scatter(fpr[ix], tpr[ix], marker='o', color='black', label='Best')
plt.xlabel('Tasa de falsos positivos')
plt.ylabel('Tasa de verdaderos positivos')
plt.legend()
# show the plot
plt.show()
###Output
Best Threshold=0.368509, G-Mean=0.881
###Markdown
Default Linear kernel
###Code
svc=SVC(max_iter=40, kernel='linear')
svc.fit(X_train,y_train)
y_pred=svc.predict(X_test)
print('Accuracy Score:')
print(metrics.accuracy_score(y_test,y_pred))
predictions = (svc.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(y_test, predictions)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions))
print('Precisión: %.2f' % precision_score(y_test, predictions))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Default RBF kernel
###Code
svc=SVC(max_iter=20,kernel='rbf')
svc.fit(X_train,y_train)
y_pred=svc.predict(X_test)
print('Accuracy Score:')
print(metrics.accuracy_score(y_test,y_pred))
predictions = (svc.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(y_test, predictions)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions))
print('Precisión: %.2f' % precision_score(y_test, predictions))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Default Polynomial kernel
###Code
svc_poly=SVC(max_iter=85, kernel='poly')
svc_poly.fit(X_train,y_train)
y_pred=svc_poly.predict(X_test)
print('Accuracy Score:')
print(metrics.accuracy_score(y_test,y_pred))
predictions = (svc_poly.predict(X_test) > 0.5).astype("int32")
cm_wmdft_nn = confusion_matrix(y_test, predictions)
print('Exactitud: %.2f' % accuracy_score(y_test, predictions))
print('Exhaustividad: %.2f' % recall_score(y_test, predictions))
print('Precisión: %.2f' % precision_score(y_test, predictions))
plt.figure(figsize = (10,7))
x_axis_labels = ['No Duplicadas Pred.', 'Duplicadas Pred.']
y_axis_labels = ['No Duplicadas', 'Duplicadas']
sn.set(font_scale=1.5)
sn.heatmap(cm_wmdft_nn, annot=True, xticklabels= x_axis_labels, yticklabels= y_axis_labels, cmap="Blues", fmt="d")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
###Output
precision recall f1-score support
0 0.94 0.96 0.95 137
1 0.55 0.43 0.48 14
accuracy 0.91 151
macro avg 0.74 0.70 0.72 151
weighted avg 0.91 0.91 0.91 151
###Markdown
**Guardar modelos**
###Code
import joblib
# Keras
model.save(path_model + '/keras.h5')
# Random Forest
joblib.dump(rfc, path_model + '/rfc.joblib')
# Support Vector Machine
joblib.dump(svc_poly, path_model + '/svc_poly.joblib')
joblib.dump(svc_h, path_model + '/svc_h.joblib')
###Output
_____no_output_____
###Markdown
**QUERYS**
###Code
model_path = '/content/drive/MyDrive/DC/SO_vectors_200.bin'
word_vect = KeyedVectors.load_word2vec_format(model_path, binary=True)
word_vect.init_sims(replace=True) #normalizar vectores
word_vect
###Output
_____no_output_____
###Markdown
**Cargar Modelos**
###Code
import joblib
keras_model = keras.models.load_model(path_model +'/keras.h5')
rf_model = joblib.load(path_model + '/rfc.joblib')
svc_poly_model = joblib.load(path_model + '/svc_poly.joblib')
svc_h_model = joblib.load(path_model + '/svc_h.joblib')
###Output
_____no_output_____
###Markdown
**Pruebas**
###Code
mt_df = pd.read_pickle(path_pkl +'/querys.pkl')
ex = mt_df['bodies_wmd'] < 0.10
mt_df = mt_df[ex]
mt_df.reset_index
mt_df
mt_df.shape
mt_df['body_es'].values[20]
mt_df['tags_es'].values[20]
mt_df['body_en'].values[20]
mt_df['tags_en'].values[20]
body_prueba_es = mt_df['body_es'].values[20]
tag_prueba_es = mt_df['tags_es'].values[20]
body_prueba_en = mt_df['body_en'].values[20]
tag_prueba_en = mt_df['tags_en'].values[20]
bod_wmd = word_vect.wmdistance(prepare(body_prueba_es), prepare(body_prueba_en))
tag_wmd = word_vect.wmdistance(tags(tag_prueba_es), tags(tag_prueba_en))
body_prueba_es = " select country,sum(visits) as visits,sum(visits-1) as repetidos, count(1) as total from stats group by country having count(1) > 1 "
tag_prueba_es = "php mysql mysqli "
body_prueba_en = " select country,sum(visits) as visits,sum(visits-1) as repetidos, count(1) as total from stats group by country having count(1) > 1 "
tag_prueba_en = "mysql "
bod_wmd = word_vect.wmdistance(prepare(body_prueba_es), prepare(body_prueba_en))
tag_wmd = word_vect.wmdistance(tags(tag_prueba_es), tags(tag_prueba_en))
###Output
_____no_output_____
###Markdown
**Prueba Keras**
###Code
X_prueba = [[bod_wmd, tag_wmd]]
X_prueba = np.asarray(X_prueba, dtype=np.float32)
(keras_model.predict(X_prueba) > 0.5).astype("float32")
###Output
_____no_output_____
###Markdown
**Prueba RF**
###Code
X_prueba = [[bod_wmd, tag_wmd]]
X_prueba = np.asarray(X_prueba, dtype=np.float32)
(rf_model.predict(X_prueba) > 0.5).astype("int32")
###Output
_____no_output_____
###Markdown
**Prueba SVC Polynomial**
###Code
X_prueba = [[bod_wmd, tag_wmd]]
X_prueba = np.asarray(X_prueba, dtype=np.float32)
lr_probs = svc_poly_model.predict(X_prueba)
lr_probs = lr_probs[:, 1]
lr_auc = roc_auc_score(Y_test, lr_probs)
###Output
_____no_output_____
###Markdown
**Prueba SVM Hyper**
###Code
X_prueba = [[bod_wmd, tag_wmd]]
X_prueba = np.asarray(X_prueba, dtype=np.float32)
(svc_h_model.predict(X_prueba) > 0.5).astype("int32")
###Output
_____no_output_____ |
Python Absolute Beginner/Module_2.0_Tutorials_Functions.ipynb | ###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.', student_name,'is going to be in the class for', student_age, 'weeks.','It will be difficult for',student_name,'.')
###Output
Hiroto Yamaguchi will be in the class for 17 year old students. Hiroto Yamaguchi is going to be in the class for 17 weeks. It will be difficult for Hiroto Yamaguchi .
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
###Output
_____no_output_____
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
Hello there!
!
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
print("!")
yell_it()
###Output
!
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
Hi
Bye
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
def yell_this(phrase = words_to_yell):
print(phrase)
# [ ] get user input in variable words_to_yell
words_to_yell=("I like to yell!")
# [ ] call yell_this function with words_to_yell as argument
yell_this()
###Output
I like to yell!
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
Save Now! Save Now!
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
def make_doctor(phrase = "John"):
double = phrase + "John"
return double
full_name = (make_doctor("Dr."))
print(full_name)
###Output
Dr.John
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
def make_schedule(period1, period2, period3):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title() +", [3rd] " + period3.title())
return schedule
student_schedule = make_schedule("mathematics", "history", "science")
print("SCHEDULE:", student_schedule)
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History, [3rd] Science
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
have_hat = hat_available = ('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
###Output
hat available is green
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
have_hat = hat_available = ('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
###Output
hat available is green
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
bird_available = bird_types = ()
# [ ] user input
def bird_available(bird):
bird_types = 'crow, robin, parrot, eagle, sandpiper, hawk, piegon'
return(bird.lower() in bird_types)
# [ ] call bird_available
bird_available('crow')
# [ ] print availbility status
print('It is',bird_available('crow'),'we have crows.')
###Output
It is True we have crows.
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
2 will be ordered
###Markdown
Absolute Beginner Module 2: Functions (P1M2) Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. [](https://youtu.be/4mE3mL6oQX0)Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age,'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
_____no_output_____
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
###Output
_____no_output_____
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain. basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi(): #defining the function
print("Hello there!")
print("goodbye")
say_hi() #calling the function
# define three_three
def three_three():
print(33)
three_three()
###Output
_____no_output_____
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
#def say_hi():
# print("Hello there!")
# print("goodbye")
# end of indentation ends the function
# define three_three
#def three_three():
# print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
_____no_output_____
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
###Output
_____no_output_____
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
###Output
Hiroto Yamaguchi will be in the class for 17 year old students.
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
say_hi()
# define three_three
def three_three():
print(33)
three_three()
###Output
33
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
Hello there!
goodbye
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
phrase = "hi"
print(phrase+"!")
yell_it()
###Output
hi!
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
Hi
Bye
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
def yell_this(phrase):
print(phrase.upper())
# [ ] get user input in variable words_to_yell
words_to_yell = input("Give me some words to yell: ")
# [ ] call yell_this function with words_to_yell as argument
yell_this(words_to_yell)
###Output
Give me some words to yell: hey my name is vlad
HEY MY NAME IS VLAD
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
Save Now! Save Now!
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
def make_doctor():
full_name = input("Insert Full Name")
doctor = "Doctor." + full_name
return doctor
print(make_doctor)
###Output
_____no_output_____
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
def make_schedule(period1, period2, period3):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title() + ", [3rd] " + period3.title())
return schedule
student_schedule = make_schedule("mathematics", "history","science")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History, [3rd] Science
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
have_hat = hat_available('RED')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
def bird_available(types):
bird_types = "crow, robin, parrot, eagle, sandpiper, hawk, pigeon"
return (types.lower() in bird_types)
# [ ] user input'
# [ ] call bird_available
bird_wanted = bird_available(input("What bird do you want: "))
# [ ] print availbility status
print("Is your bird available: ", bird_wanted)
###Output
What bird do you want: hawk
Is your bird available: True
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
enter how many you want: 8
8 will be ordered
###Markdown
Concept Functions that have Parameters**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
_____no_output_____
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
# [ ] get user input in variable words_to_yell
# [ ] call yell_this function with words_to_yell as argument
###Output
_____no_output_____
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value - **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_double(msg_2x))
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
_____no_output_____
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
###Output
_____no_output_____
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
_____no_output_____
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
###Output
_____no_output_____
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
_____no_output_____
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
have_hat = hat_available('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
###Output
_____no_output_____
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
# [ ] user input
# [ ] call bird_available
# [ ] print availbility status
###Output
_____no_output_____
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
_____no_output_____
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
_____no_output_____
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
###Output
_____no_output_____
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
###Output
_____no_output_____
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
_____no_output_____
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
###Output
_____no_output_____
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
_____no_output_____
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
# [ ] get user input in variable words_to_yell
# [ ] call yell_this function with words_to_yell as argument
###Output
_____no_output_____
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
_____no_output_____
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
###Output
_____no_output_____
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
_____no_output_____
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
###Output
_____no_output_____
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
have_hat = hat_available('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
###Output
_____no_output_____
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
have_hat = hat_available('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
###Output
_____no_output_____
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
# [ ] user input
# [ ] call bird_available
# [ ] print availbility status
###Output
_____no_output_____
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
_____no_output_____
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.', ' this is the class for juniors.', ' This is an additonal argument', ' time for another one.', 'what happens with 8 or more arguments?')
###Output
Hiroto Yamaguchi will be in the class for 17 year old students. this is the class for juniors. This is an additonal argument time for another one. what happens with 8 or more arguments?
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
###Output
_____no_output_____
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
Hello there!
goodbye
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
print(" This is my loud voice!".upper())
yell_it()
###Output
THIS IS MY LOUD VOICE!
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
Hi
Bye
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
def yell_this(phrase):
print(phrase.upper() + ' OWWW')
# [ ] get user input in variable words_to_yell
words_to_yell = input("What words do you want to yell?").upper()
# [ ] call yell_this function with words_to_yell as argument
yell_this(words_to_yell)
###Output
What words do you want to yell?noo
NOO OWWW
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
Save Now! Save Now!
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
def make_doctor(phrase):
print('Doctor ' + phrase)
full_name = input("What is your name?")
make_doctor(full_name)
###Output
What is your name?Logan
Doctor Logan
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
def make_schedule(period1, period2, period3):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title() + ", [3rd] " + period3.title())
return schedule
student_schedule = make_schedule("mathematics", "history", "Art")
print("Schedule:", student_schedule)
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
###Output
Schedule: [1st] Mathematics, [2nd] History, [3rd] Art
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
def bird_available(type):
bird_types = 'crow robin parrot eagle sandpiper hawk piegon'
return(type.lower() in bird_types )
# [ ] user input
bird_input = input("Is the bird on the list? ").lower()
# [ ] call bird_available
bird_input = bird_available(bird_input)
# [ ] print availbility status
print('is the bird avaliable? ',bird_input)
###Output
Is the bird on the list? crow
is the bird avaliable? True
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many(quantity):
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many(input)
print(number_needed, "will be ordered")
###Output
enter how many you want: 7
7 will be ordered
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
###Output
Hiroto Yamaguchi will be in the class for 17 year old students.
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
say_hi()
# define three_three
def three_three():
print(33)
three_three()
###Output
33
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
Hello there!
goodbye
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
print("blue")
yell_it()
###Output
blue
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
_____no_output_____
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
# [ ] get user input in variable words_to_yell
# [ ] call yell_this function with words_to_yell as argument
###Output
_____no_output_____
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
_____no_output_____
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
###Output
_____no_output_____
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
_____no_output_____
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
###Output
_____no_output_____
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
have_hat = hat_available('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
###Output
_____no_output_____
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
# [ ] user input
# [ ] call bird_available
# [ ] print availbility status
###Output
_____no_output_____
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
8 will be ordered
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.' + ' She is pretty selfish, ' + "but she doesn't care. " + 'She will have to learn one day, ' + 'maybe.')
###Output
Hiroto Yamaguchi will be in the class for 17 year old students. She is pretty selfish, but she doesn't care. She will have to learn one day, maybe.
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
###Output
_____no_output_____
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
three_three()
###Output
Hello there!
goodbye
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
phrase = "you crazy bruh"
print(phrase.upper() + '!')
yell_it()
###Output
YOU CRAZY BRUH!
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "haha"):
print(phrase)
say_this()
say_this("no, you")
###Output
haha
no, you
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
def yell_this():
# [ ] get user input in variable words_to_yell
words_to_yell = input()
print(words_to_yell)
# [ ] call yell_this function with words_to_yell as argument
yell_this()
###Output
I hate you
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
Save Now! Save Now!
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
def make_doctor(name):
full_name = input(name)
return full_name
print(make_doctor("Enter title here"))
###Output
Enter title here you
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
def make_schedule(period1, period2, period3):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title() + ", [3rd] " + period3.title())
return schedule
student_schedule = make_schedule("mathematics", "history", "dumb class")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History, [3rd] Dumb Class
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
have_hat = hat_available('green')
print('hat available is', have_hat)
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
###Output
_____no_output_____
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
def bird_available(bird):
# [ ] user input
bird_types = 'crow robin parrot eagle sandpiper hawk piegon'
return (bird.lower() in bird_types)
# [ ] call bird_available
bird_now = bird_available(input("Type bird name here"))
# [ ] print availbility status
print(bird_now)
###Output
Type bird name here bird
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
enter how many you want: 2
###Markdown
Module 2: Functions Functions Arguments & Parameters- **Creating a simple Function with a parameter**- Exploring Functions with `return` values - Creating Functions with multiple parameters- Sequence in python -----> Student will be able to - **create functions with a parameter** - create functions with a `return` value - create functions with multiple parameters- use knowledge of sequence in coding tasks - Use coding best practices Before you begin here, watch the Functions video in D2L. Concept Calling Functions with Arguments: print() is a function built in to Python!Functions are used for code tasks that are intended to be reused. For example, you have already used the print() function and passed it **arguments** by putting strings and variables into its parentheses. []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`** - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console. - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function. - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas. - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output Examples
###Code
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "Hiroto Yamaguchi"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
###Output
line 1
line 2
line 4
###Markdown
Task 1 Passing Arguments to `print()` Many Arguments can be passed to print - update the print statement to use **`print()`** with **8** or more arguments
###Code
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "Hiroto Yamaguchi"
class_progress = "He's only 13,"
class_grades = "A's in every course."
print(student_name,'will be in the class for',student_age, 'year old students.', class_progress, "but he's highly intelligent and has achieved", class_grades, "So he'll be just fine.")
###Output
Hiroto Yamaguchi will be in the class for 17 year old students. He's only 13, but he's highly intelligent and has achieved A's in every course. So he'll be just fine.
###Markdown
Concept Create a simple FunctionCreating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}]) basics of a user defined function- define a function with **`def`** - use indentation (4 spaces)- define parameters- optional parameters - **`return`** values (or none)- function scope (basics defaults) `def some_function():`use the **`def`** statement when creating a **function** - use a function name that **starts with a letter** or underscore (usually a lower-case letter)- function names can contain **letters, numbers or underscores**- parenthesis **()** follow the function name- a colon **:** follows the parenthesis- the code for the function is indented under the function definition (use 4 spaces for this course)```pythondef some_function(): code the function tasks indented here ```The **end of the function** is denoted by returning to **no indentation** Examples
###Code
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
three_three()
say_hi()
###Output
33
Hello there!
goodbye
###Markdown
Concept Call a function by nameCall a simple function using the function name followed by parenthesis. For instance, calling print is **`print()`** Examples
###Code
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
###Output
Hello there!
goodbye
33
###Markdown
Task 2 Define and call a simple function `yell_it()` `yell_it()` prints the phrase with "!" concatenated to the end- takes no arguments- indented function code does the following - define a variable for called **`phrase`** and intialize with a short *phrase* - prints **`phrase`** as all upper-case letters followed by "!"- call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
###Code
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
print(input("What would you like to tell the world? ")+ "!")
yell_it()
###Output
What would you like to tell the world? BE NICE
BE NICE!
###Markdown
Concept Functions that have Parameters[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])**`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object an **Argument** is a value given for a parameter when calling a function - **`type`** is called providing an **Argument** - in this case the string *"Hello"*```pythontype("Hello")``` Defining Function Parameters- Parameters are defined inside of the parenthesis as part of a function **`def`** statement- Parameters are typically copies of objects that are available for use in function code```pythondef say_this(phrase): print(phrase)``` Function can have default Arguments- Default Arguments are used if no argument is supplied- Default arguments are assigned when creating the parameter list```pythondef say_this(phrase = "Hi"): print(phrase)``` Examples
###Code
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
###Output
Hi
Bye
###Markdown
Task 3 Define `yell_this()` and call with variable argument - define variable **`words_to_yell`** as a string gathered from user `input()`- Call **`yell_this()`** with **`words_to_yell`** as argument- get user input() for the string words_to_yell
###Code
# [ ] define yell_this()
def yell_this(phrase):
print(words_to_yell)
# [ ] get user input in variable words_to_yell
words_to_yell = input('What would you like to yell? ')
# [ ] call yell_this function with words_to_yell as argument
yell_this(words_to_yell)
###Output
What would you like to yell? test
test
###Markdown
Module 2 Part 2 Functions Arguments & Parameters- Creating a simple Function with a parameter- **Exploring Functions with `return` values** - **Creating Functions with multiple parameters** - Sequence in python -----> Student will be able to - create functions with a parameter - **create functions with a `return` value**- **create functions with multiple parameters**- use knowledge of sequence in coding tasks - Use coding best practices Concepts Calling a function with a return value []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/db990568-d940-4ede-a063-7e40ed25c978/Unit1_Section3.2-function-return.vtt","srclang":"en","kind":"subtitles","label":"english"}])- **`type()`** returns an object type- **`type()`** can be called with a float the return value can be stored in a variable```pythonobject_type = type(2.33)``` creating a function with a return value - **`return`** keyword in a function *returns* a value after *exiting* the function ```pythondef msg_double(phrase): double = phrase + " " + phrase return double``` Examples review and run the code
###Code
# Message double returns the string Argument doubled
def msg_double(phrase):
double = phrase + " " + phrase
return double
# save return value in variable
msg_2x = msg_double("let's go")
print(msg_2x)
# example of functions with return values used in functions
def msg_double(phrase):
double = phrase + " " + phrase
return double
# prints the returned object
print(msg_double("Save Now!"))
# echo the type of the returned object
type(msg_double("Save Now!"))
###Output
Save Now! Save Now!
###Markdown
Task 4 Doctor: a function that adds the "Doctor" title to a name- Define function `make_doctor()` that takes a parameter `name`- get user **input** for variable **`full_name`**- call the function using `full_name` as argument- print the return value
###Code
# create and call make_doctor() with full_name argument from user input - then print the return value
def make_doctor(phrase):
dr_name = "Dr. " + phrase
return dr_name
full_name = input("What is your full name? ")
print(make_doctor(full_name))
###Output
What is your full name? Derrick Bowles
Dr. Derrick Bowles
###Markdown
Concepts Functions with multiple parametersFunctions can have multiple parameters separated by commas[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d82c3856-61ff-4fa3-9a20-df8f6ea4dd7a/Unit1_Section3.2-MultiParam_Function.vtt","srclang":"en","kind":"subtitles","label":"english"}]) Example review and run the code
###Code
def make_schedule(period1, period2):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title())
return schedule
student_schedule = make_schedule("mathematics", "history")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Mathematics, [2nd] History
###Markdown
Task 5 Define `make_schedule()` adding a 3rd period to - Start with the above example code- add a parameter period_3- update function code to add period_3 to the schedule- call **`student_schedule()`** with an additional argument such as 'science'- print the schedule
###Code
# [ ] add a 3rd period parameter to make_schedule
# [ ] Optional - print a schedule for 6 classes (Tip: perhaps let the function make this easy)
def make_schedule(period1, period2, period3):
schedule = ("[1st] " + period1.title() + ", [2nd] " + period2.title() + ", [3rd] " + period3.title())
return schedule
student_schedule = make_schedule("algebra", "united states history", "physics")
print("SCHEDULE:", student_schedule)
###Output
SCHEDULE: [1st] Algebra, [2nd] United States History, [3rd] Physics
###Markdown
Module 2 Part 3 Functions Arguments & Parameters- Creating a simple Function with parameters- Exploring Functions with `return` values - Creating Functions with multiple parameters- **Sequence in python** -----> Student will be able to - create functions with a parameter - create functions with a `return` value - create functions with multiple parameters- **use knowledge of sequence in coding tasks** - **Use coding best practices** Concept SequenceIn programming, **sequence** refers to the order that code is processed. Objects in Python, such as variables and functions, are not available until they have been processed. Processing sequence flows from the top of a page of code to the bottom. This often means that **Function definitions are placed at the beginning of a page of code.**[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29ebdee3-33e8-487f-9c73-621219e5e6d2/Unit1_Section3.3-Object_Sequence.vtt","srclang":"en","kind":"subtitles","label":"english"}])In the sample below, the function **`hat_color`** cannot be accessed since it is initialized after it is called at the bottom of the code. ```pythonhave_hat = hat_available('green') print('hat available is', have_hat)def hat_available(color): hat_colors = 'black, red, blue, green, white, grey, brown, pink' return(color.lower() in hat_colors)``` This results in an error - the code flows from top to bottom is in the incorrect **sequence** ```pythonNameError: name 'hat_available' is not defined```In the statement **`have_hat = hat_available('green')`** the function **`hat_available()`** needs to be called after the function has been defined> **Note:** an argument or variable is said to be **hard coded** when assigned a literal or constant value. It is a good habit to avoid creating hard coded values in functions, such as `hat_colors = 'black, red, blue, green, white, grey, brown, pink'` Examples
###Code
# review and run code - note: fix error in the following "tasks" section
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
# return Boolean
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Task 6 Change the Sequence to fix the `NameError`- [ ] fix the code **sequence** so the **`hat_available()`** function is availabe when called and the code runs without error
###Code
# [ ] fix the sequence of the code to remove the NameError
def hat_available(color):
hat_colors = 'black, red, blue, green, white, grey, brown, pink'
return(color.lower() in hat_colors)
have_hat = hat_available('green')
print('hat available is', have_hat)
###Output
hat available is True
###Markdown
Concepts Programming Style Tip: Avoid Hard-Coding "Hard-coding" is placing data values directly into codeAn example of hard-coding from above is **`have_hat = hat_available('green')`** where the argument `'green'` is hard-codedA programming best practice is to **avoid hard-coding values when possible**- Use varibles and verse hard-coded - Often preferable to use input such as a configuration file (advanced topic) or user input.These practices allow changing the data without disturbing the main code and makes code more reusable. Task 7 Program: bird_availableThe program should ask for user to "input a bird name to check for availability" and print a statement informing of availability[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/767e4db3-7909-4829-99db-fd6750ea5d54/Unit1_Section3.3-Bird_Available.vtt","srclang":"en","kind":"subtitles","label":"english"}]) create this program with a Boolean function `bird_available()`- has parameter that takes the name of a type of bird- for this exercise the variable `bird_types = 'crow robin parrot eagle sandpiper hawk piegon'`- return `True` or `False` (we are making a Boolean function)- call the function using the name of a bird type from user input- print a sentence that indicates the availablity of the type of bird checked
###Code
# [ ] create function bird_available
def bird_available(bird):
bird_types = 'crow robin parrot eagle sandpiper hawk pigeon'
return(bird.lower() in bird_types)
# [ ] user input
bird_choice = input("What type of bird do you want to be? ")
# [ ] call bird_available
have_bird = bird_available(bird_choice)
# [ ] print availbility status
if bird_available(bird_choice) is True:
print("Have fun flying around!")
else:
print("So sorry. You're grounded.")
###Output
What type of bird do you want to be? jay
So sorry. You're grounded.
###Markdown
Task 8 Fix The Error
###Code
# define function how_many
def how_many():
requested = input("enter how many you want: ")
return requested
# get the number_needed
number_needed = how_many()
print(number_needed, "will be ordered")
###Output
enter how many you want: 2
2 will be ordered
|
Documentation/Tutorial/Tutorial_Abinit.ipynb | ###Markdown
Running BerkeleyGW with BGWpy In this notebook, we assume that you are somewhat familiar with the BerkeleyGW software: what problem it solves, and what is the general workflow to run it. We also assume that you have a basic knowledge of Python and its terminology.Before you begin, make sure that you have the following packages installed:* Jupyter Notebook* Abinit* BerkeleyGW* BGWpyTo run BGWpy, you'll also need the `bin` directories of BerkeleyGW and Abinit installations located in your `PATH` environment variable. Checking your configuration The following cell is used to generate information that we'll need, should we have to debug this notebook. You don't need to run it, but it may be useful to look at for educational purposes.
###Code
import sys
import os
import BGWpy.config as defaults
print("Python kernel:\n {} ".format(sys.executable))
print("Python version:\n {} ".format(sys.version))
print("Current working directory:\n {} ".format(os.getcwd()))
print("Configuration file:\n {} ".format(defaults.config_file))
print("Use HDF5?:\n {} ".format(defaults.flavors['use_hdf5']))
print("Use complex version of BerkeleyGW?:\n {}".format(defaults.flavors['flavor_complex']))
print("DFT Flavor:\n {} ".format(defaults.flavors['dft_flavor']))
print("Default MPI settings:\n {} ".format(defaults.default_mpi))
print("Default runscript settings:\n {} ".format(defaults.default_runscript))
print("Paths in $PATH:")
for i in os.environ['PATH'].split(":"):
print(" {}".format(i))
###Output
_____no_output_____
###Markdown
Pay attention to the `use_hdf5` flag. It should reflect whether you compiled BerkeleyGW with HDF5 support or not. If the information above is not consistent with what you have, then you should edit your `~/.BGWpyrc` file accordingly. This is important because the file names that BGW expects from a calculation depends on it. If you don't have HDF5, then you should remove all the '.h5' extensions from file names. It is highly recommended, however, that you build BGW with HDF5 support, as it could become mandatory in the future.If you don't have a `~/.BGWpyrc` yet, you can copy it from the `BGWpy/config` directory, or simply run the script `BGWpy_make_config_file.py`. Load Libraries First, we load two external packages which BGWpy uses: `numpy` and `pymatgen`.
###Code
import pymatgen
import numpy as np
###Output
_____no_output_____
###Markdown
Next, we load the `Structure` class from the BGWpy package. But really this is the Structure object from the `pymatgen` package.
###Code
from BGWpy import Structure
###Output
_____no_output_____
###Markdown
Next, we load the classes which create and run Abinit calculations.
###Code
from BGWpy import AbinitScfTask, AbinitBgwFlow
###Output
_____no_output_____
###Markdown
Finally, we load the classes with create and run BerkeleyGW calculations.
###Code
from BGWpy import EpsilonTask, SigmaTask, KernelTask, AbsorptionTask
###Output
_____no_output_____
###Markdown
Make sure that both the BerkeleyGW and Abinit binary folders are in the PATH folder Create the Structure For this tutorial, we'll calculate the many-body properties of the GaAs primitive cell. All files that you will need have been provided for you in the `Data` subdirectory.SHOW PICTURE HERE. (Even better if can play using `pymatgen`...) Geometries are specified in BGWpy using pymatgen's `Structure` class, which may be imported directly from BGWpy or through pymatgen.There are a number of ways that we can import geometries into BGWpy using the `Structure` class. For example, we can load them from a pre-existing CIF file:
###Code
structure = Structure.from_file('../Data/Structures/GaAs.cif')
print(structure)
###Output
_____no_output_____
###Markdown
We can also load them from a previous pymatgen Structure which has been exported to a file in the JSON format:
###Code
Structure.from_file('../Data/Structures/GaAs.json')
print(structure)
###Output
_____no_output_____
###Markdown
We can even use pymatgen to directly create the structure in a Python script:
###Code
acell_angstrom = 5.6535
rprim = np.array([[.0,.5,.5],[.5,.0,.5],[.5,.5,.0]]) * acell_angstrom
structure = pymatgen.Structure(
lattice = pymatgen.core.lattice.Lattice(rprim),
species= ['Ga', 'As'],
coords = [3*[.0], 3*[.25]],
)
print(structure)
###Output
_____no_output_____
###Markdown
For more information about pymatgen, please consult its official documentation. Generating the Ground State Density To begin, we will run a ground state DFT calculation to self-consistency to generate the ground state charge density for the calculation. This ground state charge density will be fed into all wavefunction calculations in the next step. We use Abinit in this notebook, however BerkeleyGW and BGWpy supports a number of other DFT packages.First, we will create a object of the `AbinitScfTask` task to prepare the needed variables:
###Code
task = AbinitScfTask(
dirname = 'Runs/11-Density',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs', # File names prefix. You don't really need to specify this with abinit.
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
# These are the default parameters for the MPI runner.
# You can specify them here, but it's better to store this info in
# the configuration file ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
###Output
_____no_output_____
###Markdown
As you can see, BGWpy has a number of parameters that you will need to set. However, many of these parameters are consistent from calculation to calculation, so we'll store them in dictionaries that we can reuse for future steps.First, a dictionary to store all variables that will be used across all Abinit calculations:
###Code
structure_and_pseudos = dict(
structure = Structure.from_file('../Data/Structures/GaAs.json'),
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
)
###Output
_____no_output_____
###Markdown
Next, a dictionary to store the variables which are used only for this particular SCF task:
###Code
scf_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
)
###Output
_____no_output_____
###Markdown
And finally, a dictionary to store the settings related to running calculations with MPI.
###Code
mpi_settings = dict( # Then again, you should store those settings in ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
###Output
_____no_output_____
###Markdown
Note that all these dictionaries correspond to arguments for the `AbinitScfTask`, stored as key/value pairs. This allows us to use dictionary unpacking to considerably tidy up our code:
###Code
scf_task = AbinitScfTask(
dirname='Runs/11-Density',
**scf_settings,
**structure_and_pseudos,
**mpi_settings,
)
###Output
_____no_output_____
###Markdown
Now that we've created the `AbinitScfTask` task, we can use the `write` method to write the needed input files to disk:
###Code
scf_task.write()
###Output
_____no_output_____
###Markdown
If you receive an error message stating that an executable could not be found, you likely do not have the needed BerkeleyGW and Abinit binary folders in your `PATH` environment variable.Let's take a look at the folder that was created by this task using Jupyter's built-in `!ls` magic command:
###Code
!ls 'Runs/11-Density'
###Output
_____no_output_____
###Markdown
In our new folder, there are several new directories:* `GaAs.files`, the list of files used by Abinit.* `GaAs.in`, the Abinit input variables.* `run.sh`, the execution script.and folders used by abinit for the input data files, outputs, and temporary files:* `input_data`* `out_data`* `tmp_data`Now that we've created the needed input files, let's run the `run.sh` script using the `run` method. Note that this step will take a few seconds, as it will run Abinit in the background.
###Code
scf_task.run()
###Output
_____no_output_____
###Markdown
Finally, we can check the status of the calculation using the `report` method. You should see a message telling you that it's been completed.
###Code
scf_task.report()
###Output
_____no_output_____
###Markdown
It is possible to access the data files produced by this task with
###Code
charge_density_fname = scf_task.get_odat('DEN')
vxc_fname = scf_task.get_odat('VXC')
print("Charge density file name: {}".format(charge_density_fname))
print("Exchange-correlation potential file name: {}".format(vxc_fname))
###Output
_____no_output_____
###Markdown
This won't be necessary, however, when we get to use the `AbinitBgwFlow`. Generating the Wavefunctions Now that we've generated the ground state density, we'll used this to generate the wavefunctions that we'll feed into BerkeleyGW. This may be done with the ` AbinitBgwFlow` class. As mentioned in the introduction, we'll need up to 6 different types of wavefunction files. WFN `WFN` is the "standard" k-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.It (and all other wavefunction files) are generated using the `AbinitBgwFlow` class. The only difference between these wavefunction types are the parameter values used:
###Code
task = AbinitBgwFlow(
dirname = 'Runs/12-Wfn',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
nband = 9, # Number of bands
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = '11-Density/out_data/odat_DEN',
vxc_fname = '11-Density/out_data/odat_VXC',
# These are the default parameters for the MPI runner.
# Please adapt them to your needs.
nproc = 1,
nproc_per_node = 1,
mpirun = 'mpirun',
nproc_flag = '-n',
nproc_per_node_flag = '--npernode',
)
###Output
_____no_output_____
###Markdown
As before, we will break up these arguments into sets of dictionaries: the settings common to all wavefunction calculations
###Code
wfn_common_settings = dict(
ecut = 5.0, # Wavefunctions cutoff energy
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = charge_density_fname,
vxc_fname = vxc_fname,
)
###Output
_____no_output_____
###Markdown
and the arguments specific to the current wavefunction calculation
###Code
wfn_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
nband = 9, # Number of bands
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
Reusing dictionaries of settings previously defined,We can now create the instance of the `AbinitBgwFlow` class:
###Code
wfn_flow = AbinitBgwFlow(
dirname='Runs/12-Wfn',
**wfn_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
As before, we'll write the input files to disc then run the calculation:
###Code
wfn_flow.write()
wfn_flow.run()
wfn_flow.report()
###Output
_____no_output_____
###Markdown
The output specifies that we've actually run two calculations here: a `WFN` calculation where we calculate wavefunctions using Abinit, and `Abi2BGW` where we convert the resulting Abinit-specific output files into a format readable by BerkeleyGW. Unlike in the density case where we ran a single task, here we're running two tasks (`WFN` and `Abi2BGW`) in a workflow (hence the name `AbiBgwFlow`). WFNq Next, we'll create `WFNq`, which is the "standard" k-shifted and q-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.The only dictionary we need to create is are the settings specific to the `WFNq` wavefunction:
###Code
wfnq_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
And then we can prepare the calculation:
###Code
wfnq_flow = AbinitBgwFlow(
dirname='Runs/13-Wfnq',
**wfnq_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
Create it, and run it:
###Code
wfnq_flow.write()
wfnq_flow.run()
wfnq_flow.report()
###Output
_____no_output_____
###Markdown
Wfn_co Next, we'll create `WFN_co`, which is the wavefunction on a coarser (and unshifted) grid than `WFN`. This is used by `Sigma`, `Kernel`, and `Absorption`, and thus will be needed by most BerkeleyGW calculations. we will also use this calculation to generate the ground state density and exchange-correlation energy density that will be used by `Sigma`.Once again, we set up the dictionary with our needed variables:
###Code
wfn_co_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.0,.0,.0], # k-points shift
nband = 9, # Number of bands
rhog_flag = True, # Also convert the charge density for BGW.
vxcg_flag = True, # Also convert vxc for BGW.
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
Note that there's a new flag `rhog_flag` which tells `AbinitBgwFlow` to generate additional density-related files,while the vxcg_flag tells the `Abi2BGW` task to read and convert the `VXC` file. Now we can prepare the calculation:
###Code
wfn_co_flow = AbinitBgwFlow(
dirname = 'Runs/14-Wfn_co',
**wfn_co_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And create and run it:
###Code
wfn_co_flow.write()
wfn_co_flow.run()
wfn_co_flow.report()
###Output
_____no_output_____
###Markdown
WFN_fi Next, we'll create `WFN_fi`, the k-shifted `WFN` on a finer grid than `WFN`. This is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (Symmetry is also turned off for this calculation.)
###Code
wfn_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfn_fi_flow = AbinitBgwFlow(
dirname = 'Runs/15-Wfn_fi',
**wfn_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfn_fi_flow.write()
wfn_fi_flow.run()
wfn_fi_flow.report()
###Output
_____no_output_____
###Markdown
WFNq_fi FINALLY, we'll create `WFNq_fi`, the k-shifted and q-shifted `WFN` on a finer grid than `WFN`. Like `WFN_fi`, this is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (And symmetry is turned off, as before.)Let's go through the steps again:
###Code
wfnq_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfnq_fi_flow = AbinitBgwFlow(
dirname = 'Runs/16-Wfnq_fi',
**wfnq_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfnq_fi_flow.write()
wfnq_fi_flow.run()
wfnq_fi_flow.report()
###Output
_____no_output_____
###Markdown
Running GW Now the moment you've been waiting for, when we actually run a GW calculation! Epsilon Our first step is to run an `Epsilon` calculation, where we'll generate the dielectric matrix (to be precise, the inverse of the dielectric matrix.)Because BerkeleyGW uses a file-based communication system, we'll need to specify the location of the wavefunction files that we previously calculated:
###Code
epsilon_input_files = dict(
wfn_fname='Runs/12-Wfn/wfn.cplx',
wfnq_fname='Runs/13-Wfnq/wfn.cplx',
)
###Output
_____no_output_____
###Markdown
Actually, we can set the file name above using a property of the flow
###Code
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
###Output
_____no_output_____
###Markdown
As well as the settings for an `Epsilon` calculation:
###Code
epsilon_settings = dict(
ngkpt = wfn_settings['ngkpt'], # 'ngkpt': [2, 2, 2],
qshift = wfnq_settings['qshift'], # 'qshift': [.001, .0, .0],
ecuteps = 10.0,
)
###Output
_____no_output_____
###Markdown
And then we can prepare the Epsilon calculation using an `EpsilonTask` object (reusing our `mpi_settings` dictionary from before):
###Code
epsilon_task = EpsilonTask(
dirname='Runs/21-Epsilon',
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
Let's run the calculation:
###Code
epsilon_task.write()
epsilon_task.run()
epsilon_task.report()
###Output
_____no_output_____
###Markdown
Sigma Now that we've calculated the (inverse) dielectric matrix and needed wavefunctions, we have everything we need to calculate the GW self-energy. This is done with the `Sigma` executable, which takes as inputs the results from our `WFN_co` and `Epsilon` calculations:
###Code
sigma_input_files = dict(
wfn_co_fname='Runs/14-Wfn_co/wfn.cplx',
rho_fname='Runs/14-Wfn_co/rho.cplx',
vxc_fname='Runs/14-Wfn_co/vxc.cplx',
eps0mat_fname='Runs/21-Epsilon/eps0mat.h5',
epsmat_fname='Runs/21-Epsilon/epsmat.h5',
)
###Output
_____no_output_____
###Markdown
Then again, making use of the object properties, we can get the above file names with
###Code
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
###Output
_____no_output_____
###Markdown
Specify the settings:
###Code
sigma_settings = dict(
ngkpt = wfn_co_settings['ngkpt'], # ngkpt': [2,2,2],
ibnd_min = 1, # Minimum band for GW corrections
ibnd_max = 8, # Maximum band for GW corrections
extra_lines = ['dont_use_vxcdat'],
#'extra_lines' : ['dont_use_vxcdat', 'dont_use_hdf5'],
)
###Output
_____no_output_____
###Markdown
Prepare the calculation:
###Code
sigma_task = SigmaTask(
dirname='Runs/22-Sigma',
structure=structure,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And finally run it.
###Code
# Execution
sigma_task.write()
sigma_task.run()
sigma_task.report()
###Output
_____no_output_____
###Markdown
If you see an `Unfinised` status, something went wrong, and you should inspect the content of the run directory, in particular the main output file `Runs/22-Sigma/sigma.out` .Make sure you are using the latest version of BerkeleyGW.If you see a `Completed` status, then congratulations! You have successfully ran a BerkeleyGW calculation from start to finish. Running BSE For those of you that want to go further, BerkeleyGW can calculate excitionic properties on the GW+BSE level of theory. This is done with the `KernelTask` and `AbsorptionTask` classes. Kernel `Kernel` takes in as inputs the results of `WFN_co` and `Epsilon`:
###Code
kernel_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
###Output
_____no_output_____
###Markdown
We can specify its settings:
###Code
kernel_settings = dict(
ngkpt = wfn_co_settings['ngkpt'],
ecuteps = epsilon_settings['ecuteps'],
nbnd_val = 4,
nbnd_cond = 4,
# These extra lines will be added verbatim to the input file.
extra_lines = ['use_symmetries_coarse_grid', 'screening_semiconductor'],
)
###Output
_____no_output_____
###Markdown
Prepare the calculation:
###Code
kernel_task = KernelTask(
dirname='Runs/23-Kernel',
structure=structure,
**kernel_input_files,
**kernel_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And finally run it:
###Code
kernel_task.write()
kernel_task.run()
kernel_task.report()
###Output
_____no_output_____
###Markdown
Absorption Finally, we solve the BSE equation via the `Absorption` executable. It has as inputs the results of `WFN_co`, `WFNq_fi`, and `WFN_fi`, as well as all previous BerkleyGW executables `Epsilon`, `Sigma`, and `Kernel`:
###Code
absorption_input_files = dict(
wfn_co_fname = 'Runs/14-Wfn_co/wfn.cplx',
wfn_fi_fname = 'Runs/15-Wfn_fi/wfn.cplx',
wfnq_fi_fname = 'Runs/16-Wfnq_fi/wfn.cplx',
eps0mat_fname = 'Runs/21-Epsilon/eps0mat.h5',
epsmat_fname = 'Runs/21-Epsilon/epsmat.h5',
eqp_fname = 'Runs/22-Sigma/eqp1.dat',
bsemat_fname = 'Runs/23-Kernel/bsemat.h5'
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = 'Runs/23-Kernel/bsexmat',
#bsedmat_fname = 'Runs/23-Kernel/bsedmat',
)
###Output
_____no_output_____
###Markdown
Or, using the appropriate variables,
###Code
absorption_input_files = dict(
wfn_co_fname = wfn_co_flow.wfn_fname,
wfn_fi_fname = wfn_fi_flow.wfn_fname,
wfnq_fi_fname = wfnq_fi_flow.wfn_fname,
eps0mat_fname = epsilon_task.eps0mat_fname,
epsmat_fname = epsilon_task.epsmat_fname,
eqp_fname = sigma_task.eqp1_fname,
bsemat_fname = kernel_task.bsemat_fname,
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = kernel_task.bsexmat_fname,
#bsedmat_fname = kernel_task.bsedmat_fname,
)
###Output
_____no_output_____
###Markdown
Next, we set the calculation settings. There are...a lot of those.
###Code
absorption_settings = dict(
ngkpt = [2, 2, 2], # k-points grid
nbnd_val = 4, # Number of valence bands
nbnd_cond = 4, # Number of conduction bands
nbnd_val_co = 4, # Number of valence bands on the coarse grid
nbnd_cond_co = 4, # Number of conduction bands on the coarse grid
nbnd_val_fi = 4, # Number of valence bands on the fine grid
nbnd_cond_fi = 4, # Number of conduction bands on the fine grid
# These extra lines will be added verbatim to the input file.
extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
# These extra variables will be added to the input file as '{variable} {value}'.
extra_variables = {
'energy_resolution': 0.15,
},
)
###Output
_____no_output_____
###Markdown
But preparing the calculation is as simple as always:
###Code
absorption_task = AbsorptionTask(
dirname='Runs/24-Absorption',
structure=structure,
**absorption_input_files,
**absorption_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And, at last, we can run it.
###Code
absorption_task.write()
absorption_task.run()
absorption_task.report()
###Output
_____no_output_____
###Markdown
Congratulations yet again! You've run a full GW+BSE calculation! Using workflows Can we do all of these steps at once? Yes we can!
###Code
from BGWpy import GWFlow, BSEFlow
flow = GWFlow(
dirname='Runs/32-GW',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 10.0,
nbnd = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 7.5,
# Extra lines and extra variables
epsilon_extra_lines = [],
epsilon_extra_variables = {},
sigma_extra_lines = ['screening_semiconductor'],
sigma_extra_variables = {},
**mpi_settings)
###Output
_____no_output_____
###Markdown
Let's execute the whole thing.
###Code
flow.write()
flow.run()
flow.report()
###Output
_____no_output_____
###Markdown
Likewise, for the BSE
###Code
flow = BSEFlow(
dirname='Runs/33-BSE',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 5.0,
nbnd = 12,
nbnd_fine = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
# Fine grids
ngkpt_fine = [4,4,4],
kshift_fine = [.0,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 10.0,
sigma_extra_lines = ['screening_semiconductor'],
# Kernel variables
nbnd_val = 4,
nbnd_cond = 4,
kernel_extra_lines = [
'use_symmetries_coarse_grid',
'screening_semiconductor',
],
# Absorption variables
nbnd_val_co=4,
nbnd_cond_co=4,
nbnd_val_fi=4,
nbnd_cond_fi=4,
absorption_extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
absorption_extra_variables = {
'energy_resolution' : 0.15,
},
**mpi_settings)
flow.write()
flow.run()
flow.report()
###Output
_____no_output_____
###Markdown
Custom workflows For a realistic GW or BSE calculation, in general, you don't run every steps all at once like we did. You actually perform a **convergence study**, in which you gradually increase the parameters until the calculation is converged. For example, in a GW calculation, we have the following convergence studies to perform:* Convergence of the k-points grids for epsilon* Convergence of the q-points grid for sigma* Convergence on the number of bands for epsilon* Convergence on the number of bands for sigma* Convergence on the size of the dielectric matrixFor these, you will need to construct your own workflow. Here is an example.
###Code
from os.path import join as pjoin
from BGWpy import Workflow
workflow = Workflow(dirname='Runs/50-Workflow')
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
)
ecuteps_l = [5.0, 7.5, 10.0]
for i, ecuteps in enumerate(ecuteps_l):
epsilon_settings['ecuteps'] = ecuteps
epsilon_task = EpsilonTask(
dirname=pjoin(workflow.dirname, 'Epsilon{}'.format(i)),
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
sigma_task = SigmaTask(
dirname=pjoin(workflow.dirname, 'Sigma{}'.format(i)),
structure=structure,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
workflow.add_tasks([epsilon_task, sigma_task])
workflow.write()
workflow.run()
workflow.report()
###Output
_____no_output_____
###Markdown
Note that you could also run and report each task sequentially with
###Code
for task in workflow.tasks:
task.run()
task.report()
###Output
_____no_output_____
###Markdown
Running BerkeleyGW with BGWpy In this notebook, we assume that you are somewhat familiar with the BerkeleyGW software: what problem it solves, and what is the general workflow to run it. We also assume that you have a basic knowledge of Python and its terminology.Before you begin, make sure that you have the following packages installed:* Jupyter Notebook* Abinit* BerkeleyGW* BGWpyTo run BGWpy, you'll also need the `bin` directories of BerkeleyGW and Abinit installations located in your `PATH` environment variable. Checking your configuration The following cell is used to generate information that we'll need, should we have to debug this notebook. You don't need to run it, but it may be useful to look at for educational purposes.
###Code
import sys
import os
import BGWpy.config as defaults
print("Python kernel:\n {} ".format(sys.executable))
print("Python version:\n {} ".format(sys.version))
print("Current working directory:\n {} ".format(os.getcwd()))
print("Configuration file:\n {} ".format(defaults.config_file))
print("Use HDF5?:\n {} ".format(defaults.flavors['use_hdf5']))
print("Use complex version of BerkeleyGW?:\n {}".format(defaults.flavors['flavor_complex']))
print("DFT Flavor:\n {} ".format(defaults.flavors['dft_flavor']))
print("Default MPI settings:\n {} ".format(defaults.default_mpi))
print("Default runscript settings:\n {} ".format(defaults.default_runscript))
print("Paths in $PATH:")
for i in os.environ['PATH'].split(":"):
print(" {}".format(i))
###Output
_____no_output_____
###Markdown
Pay attention to the `use_hdf5` flag. It should reflect whether you compiled BerkeleyGW with HDF5 support or not. If the information above is not consistent with what you have, then you should edit your `~/.BGWpyrc` file accordingly. This is important because the file names that BGW expects from a calculation depends on it. If you don't have HDF5, then you should remove all the '.h5' extensions from file names. It is highly recommended, however, that you build BGW with HDF5 support, as it could become mandatory in the future.If you don't have a `~/.BGWpyrc` yet, you can copy it from the `BGWpy/config` directory, or simply run the script `BGWpy_make_config_file.py`. Load Libraries First, we load two external packages which BGWpy uses: `numpy` and `pymatgen`.
###Code
import pymatgen
import numpy as np
###Output
_____no_output_____
###Markdown
Next, we load the `Structure` class from the BGWpy package. But really this is the Structure object from the `pymatgen` package.
###Code
from BGWpy import Structure
###Output
_____no_output_____
###Markdown
Next, we load the classes which create and run Abinit calculations.
###Code
from BGWpy import AbinitScfTask, AbinitBgwFlow
###Output
_____no_output_____
###Markdown
Finally, we load the classes with create and run BerkeleyGW calculations.
###Code
from BGWpy import EpsilonTask, SigmaTask, KernelTask, AbsorptionTask
###Output
_____no_output_____
###Markdown
Make sure that both the BerkeleyGW and Abinit binary folders are in the PATH folder Create the Structure For this tutorial, we'll calculate the many-body properties of the GaAs primitive cell. All files that you will need have been provided for you in the `Data` subdirectory.SHOW PICTURE HERE. (Even better if can play using `pymatgen`...) Geometries are specified in BGWpy using pymatgen's `Structure` class, which may be imported directly from BGWpy or through pymatgen.There are a number of ways that we can import geometries into BGWpy using the `Structure` class. For example, we can load them from a pre-existing CIF file:
###Code
structure = Structure.from_file('../Data/Structures/GaAs.cif')
print(structure)
###Output
_____no_output_____
###Markdown
We can also load them from a previous pymatgen Structure which has been exported to a file in the JSON format:
###Code
Structure.from_file('../Data/Structures/GaAs.json')
print(structure)
###Output
_____no_output_____
###Markdown
We can even use pymatgen to directly create the structure in a Python script:
###Code
acell_angstrom = 5.6535
rprim = np.array([[.0,.5,.5],[.5,.0,.5],[.5,.5,.0]]) * acell_angstrom
structure = pymatgen.core.Structure(
lattice = pymatgen.core.lattice.Lattice(rprim),
species= ['Ga', 'As'],
coords = [3*[.0], 3*[.25]],
)
print(structure)
###Output
_____no_output_____
###Markdown
For more information about pymatgen, please consult its official documentation. Generating the Ground State Density To begin, we will run a ground state DFT calculation to self-consistency to generate the ground state charge density for the calculation. This ground state charge density will be fed into all wavefunction calculations in the next step. We use Abinit in this notebook, however BerkeleyGW and BGWpy supports a number of other DFT packages.First, we will create a object of the `AbinitScfTask` task to prepare the needed variables:
###Code
task = AbinitScfTask(
dirname = 'Runs/11-Density',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs', # File names prefix. You don't really need to specify this with abinit.
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
# These are the default parameters for the MPI runner.
# You can specify them here, but it's better to store this info in
# the configuration file ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
###Output
_____no_output_____
###Markdown
As you can see, BGWpy has a number of parameters that you will need to set. However, many of these parameters are consistent from calculation to calculation, so we'll store them in dictionaries that we can reuse for future steps.First, a dictionary to store all variables that will be used across all Abinit calculations:
###Code
structure_and_pseudos = dict(
structure = Structure.from_file('../Data/Structures/GaAs.json'),
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
)
###Output
_____no_output_____
###Markdown
Next, a dictionary to store the variables which are used only for this particular SCF task:
###Code
scf_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
)
###Output
_____no_output_____
###Markdown
And finally, a dictionary to store the settings related to running calculations with MPI.
###Code
mpi_settings = dict( # Then again, you should store those settings in ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
###Output
_____no_output_____
###Markdown
Note that all these dictionaries correspond to arguments for the `AbinitScfTask`, stored as key/value pairs. This allows us to use dictionary unpacking to considerably tidy up our code:
###Code
scf_task = AbinitScfTask(
dirname='Runs/11-Density',
**scf_settings,
**structure_and_pseudos,
**mpi_settings,
)
###Output
_____no_output_____
###Markdown
Now that we've created the `AbinitScfTask` task, we can use the `write` method to write the needed input files to disk:
###Code
scf_task.write()
###Output
_____no_output_____
###Markdown
If you receive an error message stating that an executable could not be found, you likely do not have the needed BerkeleyGW and Abinit binary folders in your `PATH` environment variable.Let's take a look at the folder that was created by this task using Jupyter's built-in `!ls` magic command:
###Code
!ls 'Runs/11-Density'
###Output
_____no_output_____
###Markdown
In our new folder, there are several new directories:* `GaAs.files`, the list of files used by Abinit.* `GaAs.in`, the Abinit input variables.* `run.sh`, the execution script.and folders used by abinit for the input data files, outputs, and temporary files:* `input_data`* `out_data`* `tmp_data`Now that we've created the needed input files, let's run the `run.sh` script using the `run` method. Note that this step will take a few seconds, as it will run Abinit in the background.
###Code
scf_task.run()
###Output
_____no_output_____
###Markdown
Finally, we can check the status of the calculation using the `report` method. You should see a message telling you that it's been completed.
###Code
scf_task.report()
###Output
_____no_output_____
###Markdown
It is possible to access the data files produced by this task with
###Code
charge_density_fname = scf_task.get_odat('DEN')
vxc_fname = scf_task.get_odat('VXC')
print("Charge density file name: {}".format(charge_density_fname))
print("Exchange-correlation potential file name: {}".format(vxc_fname))
###Output
_____no_output_____
###Markdown
This won't be necessary, however, when we get to use the `AbinitBgwFlow`. Generating the Wavefunctions Now that we've generated the ground state density, we'll used this to generate the wavefunctions that we'll feed into BerkeleyGW. This may be done with the ` AbinitBgwFlow` class. As mentioned in the introduction, we'll need up to 6 different types of wavefunction files. WFN `WFN` is the "standard" k-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.It (and all other wavefunction files) are generated using the `AbinitBgwFlow` class. The only difference between these wavefunction types are the parameter values used:
###Code
task = AbinitBgwFlow(
dirname = 'Runs/12-Wfn',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
nband = 9, # Number of bands
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = '11-Density/out_data/odat_DEN',
vxc_fname = '11-Density/out_data/odat_VXC',
# These are the default parameters for the MPI runner.
# Please adapt them to your needs.
nproc = 1,
nproc_per_node = 1,
mpirun = 'mpirun',
nproc_flag = '-n',
nproc_per_node_flag = '--npernode',
)
###Output
_____no_output_____
###Markdown
As before, we will break up these arguments into sets of dictionaries: the settings common to all wavefunction calculations
###Code
wfn_common_settings = dict(
ecut = 5.0, # Wavefunctions cutoff energy
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = charge_density_fname,
vxc_fname = vxc_fname,
)
###Output
_____no_output_____
###Markdown
and the arguments specific to the current wavefunction calculation
###Code
wfn_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
nband = 9, # Number of bands
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
Reusing dictionaries of settings previously defined,We can now create the instance of the `AbinitBgwFlow` class:
###Code
wfn_flow = AbinitBgwFlow(
dirname='Runs/12-Wfn',
**wfn_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
As before, we'll write the input files to disc then run the calculation:
###Code
wfn_flow.write()
wfn_flow.run()
wfn_flow.report()
###Output
_____no_output_____
###Markdown
The output specifies that we've actually run two calculations here: a `WFN` calculation where we calculate wavefunctions using Abinit, and `Abi2BGW` where we convert the resulting Abinit-specific output files into a format readable by BerkeleyGW. Unlike in the density case where we ran a single task, here we're running two tasks (`WFN` and `Abi2BGW`) in a workflow (hence the name `AbiBgwFlow`). WFNq Next, we'll create `WFNq`, which is the "standard" k-shifted and q-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.The only dictionary we need to create is are the settings specific to the `WFNq` wavefunction:
###Code
wfnq_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
And then we can prepare the calculation:
###Code
wfnq_flow = AbinitBgwFlow(
dirname='Runs/13-Wfnq',
**wfnq_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
Create it, and run it:
###Code
wfnq_flow.write()
wfnq_flow.run()
wfnq_flow.report()
###Output
_____no_output_____
###Markdown
Wfn_co Next, we'll create `WFN_co`, which is the wavefunction on a coarser (and unshifted) grid than `WFN`. This is used by `Sigma`, `Kernel`, and `Absorption`, and thus will be needed by most BerkeleyGW calculations. we will also use this calculation to generate the ground state density and exchange-correlation energy density that will be used by `Sigma`.Once again, we set up the dictionary with our needed variables:
###Code
wfn_co_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.0,.0,.0], # k-points shift
nband = 9, # Number of bands
rhog_flag = True, # Also convert the charge density for BGW.
vxcg_flag = True, # Also convert vxc for BGW.
**wfn_common_settings)
###Output
_____no_output_____
###Markdown
Note that there's a new flag `rhog_flag` which tells `AbinitBgwFlow` to generate additional density-related files,while the vxcg_flag tells the `Abi2BGW` task to read and convert the `VXC` file. Now we can prepare the calculation:
###Code
wfn_co_flow = AbinitBgwFlow(
dirname = 'Runs/14-Wfn_co',
**wfn_co_settings,
**structure_and_pseudos,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And create and run it:
###Code
wfn_co_flow.write()
wfn_co_flow.run()
wfn_co_flow.report()
###Output
_____no_output_____
###Markdown
WFN_fi Next, we'll create `WFN_fi`, the k-shifted `WFN` on a finer grid than `WFN`. This is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (Symmetry is also turned off for this calculation.)
###Code
wfn_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfn_fi_flow = AbinitBgwFlow(
dirname = 'Runs/15-Wfn_fi',
**wfn_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfn_fi_flow.write()
wfn_fi_flow.run()
wfn_fi_flow.report()
###Output
_____no_output_____
###Markdown
WFNq_fi FINALLY, we'll create `WFNq_fi`, the k-shifted and q-shifted `WFN` on a finer grid than `WFN`. Like `WFN_fi`, this is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (And symmetry is turned off, as before.)Let's go through the steps again:
###Code
wfnq_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfnq_fi_flow = AbinitBgwFlow(
dirname = 'Runs/16-Wfnq_fi',
**wfnq_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfnq_fi_flow.write()
wfnq_fi_flow.run()
wfnq_fi_flow.report()
###Output
_____no_output_____
###Markdown
Running GW Now the moment you've been waiting for, when we actually run a GW calculation! Epsilon Our first step is to run an `Epsilon` calculation, where we'll generate the dielectric matrix (to be precise, the inverse of the dielectric matrix.)Because BerkeleyGW uses a file-based communication system, we'll need to specify the location of the wavefunction files that we previously calculated:
###Code
epsilon_input_files = dict(
wfn_fname='Runs/12-Wfn/wfn.cplx',
wfnq_fname='Runs/13-Wfnq/wfn.cplx',
)
###Output
_____no_output_____
###Markdown
Actually, we can set the file name above using a property of the flow
###Code
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
###Output
_____no_output_____
###Markdown
As well as the settings for an `Epsilon` calculation:
###Code
epsilon_settings = dict(
ngkpt = wfn_settings['ngkpt'], # 'ngkpt': [2, 2, 2],
qshift = wfnq_settings['qshift'], # 'qshift': [.001, .0, .0],
ecuteps = 10.0,
)
###Output
_____no_output_____
###Markdown
And then we can prepare the Epsilon calculation using an `EpsilonTask` object (reusing our `mpi_settings` dictionary from before):
###Code
epsilon_task = EpsilonTask(
dirname='Runs/21-Epsilon',
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
Let's run the calculation:
###Code
epsilon_task.write()
epsilon_task.run()
epsilon_task.report()
###Output
_____no_output_____
###Markdown
Sigma Now that we've calculated the (inverse) dielectric matrix and needed wavefunctions, we have everything we need to calculate the GW self-energy. This is done with the `Sigma` executable, which takes as inputs the results from our `WFN_co` and `Epsilon` calculations:
###Code
sigma_input_files = dict(
wfn_co_fname='Runs/14-Wfn_co/wfn.cplx',
rho_fname='Runs/14-Wfn_co/rho.cplx',
vxc_fname='Runs/14-Wfn_co/vxc.cplx',
eps0mat_fname='Runs/21-Epsilon/eps0mat.h5',
epsmat_fname='Runs/21-Epsilon/epsmat.h5',
)
###Output
_____no_output_____
###Markdown
Then again, making use of the object properties, we can get the above file names with
###Code
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
###Output
_____no_output_____
###Markdown
Specify the settings:
###Code
sigma_settings = dict(
ngkpt = wfn_co_settings['ngkpt'], # ngkpt': [2,2,2],
ibnd_min = 1, # Minimum band for GW corrections
ibnd_max = 8, # Maximum band for GW corrections
extra_lines = ['dont_use_vxcdat'],
#'extra_lines' : ['dont_use_vxcdat', 'dont_use_hdf5'],
)
###Output
_____no_output_____
###Markdown
Prepare the calculation:
###Code
sigma_task = SigmaTask(
dirname='Runs/22-Sigma',
structure=structure,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And finally run it.
###Code
# Execution
sigma_task.write()
sigma_task.run()
sigma_task.report()
###Output
_____no_output_____
###Markdown
If you see an `Unfinised` status, something went wrong, and you should inspect the content of the run directory, in particular the main output file `Runs/22-Sigma/sigma.out` .Make sure you are using the latest version of BerkeleyGW.If you see a `Completed` status, then congratulations! You have successfully ran a BerkeleyGW calculation from start to finish. Running BSE For those of you that want to go further, BerkeleyGW can calculate excitionic properties on the GW+BSE level of theory. This is done with the `KernelTask` and `AbsorptionTask` classes. Kernel `Kernel` takes in as inputs the results of `WFN_co` and `Epsilon`:
###Code
kernel_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
###Output
_____no_output_____
###Markdown
We can specify its settings:
###Code
kernel_settings = dict(
ngkpt = wfn_co_settings['ngkpt'],
ecuteps = epsilon_settings['ecuteps'],
nbnd_val = 4,
nbnd_cond = 4,
# These extra lines will be added verbatim to the input file.
extra_lines = ['use_symmetries_coarse_grid', 'screening_semiconductor'],
)
###Output
_____no_output_____
###Markdown
Prepare the calculation:
###Code
kernel_task = KernelTask(
dirname='Runs/23-Kernel',
structure=structure,
**kernel_input_files,
**kernel_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And finally run it:
###Code
kernel_task.write()
kernel_task.run()
kernel_task.report()
###Output
_____no_output_____
###Markdown
Absorption Finally, we solve the BSE equation via the `Absorption` executable. It has as inputs the results of `WFN_co`, `WFNq_fi`, and `WFN_fi`, as well as all previous BerkleyGW executables `Epsilon`, `Sigma`, and `Kernel`:
###Code
absorption_input_files = dict(
wfn_co_fname = 'Runs/14-Wfn_co/wfn.cplx',
wfn_fi_fname = 'Runs/15-Wfn_fi/wfn.cplx',
wfnq_fi_fname = 'Runs/16-Wfnq_fi/wfn.cplx',
eps0mat_fname = 'Runs/21-Epsilon/eps0mat.h5',
epsmat_fname = 'Runs/21-Epsilon/epsmat.h5',
eqp_fname = 'Runs/22-Sigma/eqp1.dat',
bsemat_fname = 'Runs/23-Kernel/bsemat.h5'
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = 'Runs/23-Kernel/bsexmat',
#bsedmat_fname = 'Runs/23-Kernel/bsedmat',
)
###Output
_____no_output_____
###Markdown
Or, using the appropriate variables,
###Code
absorption_input_files = dict(
wfn_co_fname = wfn_co_flow.wfn_fname,
wfn_fi_fname = wfn_fi_flow.wfn_fname,
wfnq_fi_fname = wfnq_fi_flow.wfn_fname,
eps0mat_fname = epsilon_task.eps0mat_fname,
epsmat_fname = epsilon_task.epsmat_fname,
eqp_fname = sigma_task.eqp1_fname,
bsemat_fname = kernel_task.bsemat_fname,
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = kernel_task.bsexmat_fname,
#bsedmat_fname = kernel_task.bsedmat_fname,
)
###Output
_____no_output_____
###Markdown
Next, we set the calculation settings. There are...a lot of those.
###Code
absorption_settings = dict(
ngkpt = [2, 2, 2], # k-points grid
nbnd_val = 4, # Number of valence bands
nbnd_cond = 4, # Number of conduction bands
nbnd_val_co = 4, # Number of valence bands on the coarse grid
nbnd_cond_co = 4, # Number of conduction bands on the coarse grid
nbnd_val_fi = 4, # Number of valence bands on the fine grid
nbnd_cond_fi = 4, # Number of conduction bands on the fine grid
# These extra lines will be added verbatim to the input file.
extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
# These extra variables will be added to the input file as '{variable} {value}'.
extra_variables = {
'energy_resolution': 0.15,
},
)
###Output
_____no_output_____
###Markdown
But preparing the calculation is as simple as always:
###Code
absorption_task = AbsorptionTask(
dirname='Runs/24-Absorption',
structure=structure,
**absorption_input_files,
**absorption_settings,
**mpi_settings)
###Output
_____no_output_____
###Markdown
And, at last, we can run it.
###Code
absorption_task.write()
absorption_task.run()
absorption_task.report()
###Output
_____no_output_____
###Markdown
Congratulations yet again! You've run a full GW+BSE calculation! Using workflows Can we do all of these steps at once? Yes we can!
###Code
from BGWpy import GWFlow, BSEFlow
flow = GWFlow(
dirname='Runs/32-GW',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 10.0,
nbnd = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 7.5,
# Extra lines and extra variables
epsilon_extra_lines = [],
epsilon_extra_variables = {},
sigma_extra_lines = ['screening_semiconductor'],
sigma_extra_variables = {},
**mpi_settings)
###Output
_____no_output_____
###Markdown
Let's execute the whole thing.
###Code
flow.write()
flow.run()
flow.report()
###Output
_____no_output_____
###Markdown
Likewise, for the BSE
###Code
flow = BSEFlow(
dirname='Runs/33-BSE',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 5.0,
nbnd = 12,
nbnd_fine = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
# Fine grids
ngkpt_fine = [4,4,4],
kshift_fine = [.0,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 10.0,
sigma_extra_lines = ['screening_semiconductor'],
# Kernel variables
nbnd_val = 4,
nbnd_cond = 4,
kernel_extra_lines = [
'use_symmetries_coarse_grid',
'screening_semiconductor',
],
# Absorption variables
nbnd_val_co=4,
nbnd_cond_co=4,
nbnd_val_fi=4,
nbnd_cond_fi=4,
absorption_extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
absorption_extra_variables = {
'energy_resolution' : 0.15,
},
**mpi_settings)
flow.write()
flow.run()
flow.report()
###Output
_____no_output_____
###Markdown
Custom workflows For a realistic GW or BSE calculation, in general, you don't run every steps all at once like we did. You actually perform a **convergence study**, in which you gradually increase the parameters until the calculation is converged. For example, in a GW calculation, we have the following convergence studies to perform:* Convergence of the k-points grids for epsilon* Convergence of the q-points grid for sigma* Convergence on the number of bands for epsilon* Convergence on the number of bands for sigma* Convergence on the size of the dielectric matrixFor these, you will need to construct your own workflow. Here is an example.
###Code
from os.path import join as pjoin
from BGWpy import Workflow
workflow = Workflow(dirname='Runs/50-Workflow')
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
)
ecuteps_l = [5.0, 7.5, 10.0]
for i, ecuteps in enumerate(ecuteps_l):
epsilon_settings['ecuteps'] = ecuteps
epsilon_task = EpsilonTask(
dirname=pjoin(workflow.dirname, 'Epsilon{}'.format(i)),
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
sigma_task = SigmaTask(
dirname=pjoin(workflow.dirname, 'Sigma{}'.format(i)),
structure=structure,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
workflow.add_tasks([epsilon_task, sigma_task])
workflow.write()
workflow.run()
workflow.report()
###Output
_____no_output_____
###Markdown
Note that you could also run and report each task sequentially with
###Code
for task in workflow.tasks:
task.run()
task.report()
###Output
_____no_output_____ |
examples/visualize_saliency_with_backprop_colab.ipynb | ###Markdown
Visualize image-specific class saliency with backpropagation---A quick demo of creating saliency maps for CNNs using [FlashTorch 🔦](https://github.com/MisaOgura/flashtorch).❗This notebook is for those who are using this notebook in **Google Colab**.If you aren't on Google Colab already, please head to the Colab version of this notebook **[here](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/visualise_saliency_with_backprop_colab.ipynb)** to execute.---The gradients obtained can be used to visualise an image-specific class saliency map, which can gives some intuition on regions within the input image that contribute the most (and least) to the corresponding output.More details on saliency maps: [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf). 0. Set upA GPU runtime is available on Colab for free, from the Runtime tab on the top menu bar.It is highly recommended to use GPU as a runtime for the enhanced speed of computation.
###Code
# Install flashtorch
!pip install flashtorch
# Download the example image
!mkdir -p images
!wget https://github.com/MisaOgura/flashtorch/raw/master/examples/images/great_grey_owl.jpg -P /content/images
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
import torchvision.models as models
from flashtorch.utils import (apply_transforms,
denormalize,
format_for_plotting,
load_image)
from flashtorch.utils import ImageNetIndex
from flashtorch.saliency import Backprop
###Output
_____no_output_____
###Markdown
1. Load an image
###Code
image = load_image('/content/images/great_grey_owl.jpg')
plt.imshow(image)
plt.title('Original image')
plt.axis('off');
###Output
_____no_output_____
###Markdown
2. Load a pre-trained Model
###Code
model = models.alexnet(pretrained=True)
###Output
_____no_output_____
###Markdown
3. Create an instance of Backprop with the model
###Code
backprop = Backprop(model)
###Output
_____no_output_____
###Markdown
4. Calculate the gradients of a target class w.r.t the input imageBy default, we return the gradients of all the colour channel.You can also specify to return a max gradients across the colour channel via `take_max=True` flag, as this was what the authors did in the [paper](https://arxiv.org/pdf/1312.6034.pdf) and sometimes it renders better for visualization.
###Code
imagenet = ImageNetIndex()
target_class = imagenet['great grey owl']
input_ = apply_transforms(image)
# Calculate the gradients of each pixel w.r.t. the input image
gradients = backprop.calculate_gradients(input_, target_class)
# Or, take the maximum of the gradients for each pixel across colour channels.
max_gradients = backprop.calculate_gradients(input_, target_class, take_max=True)
print('Shape of the gradients:', gradients.shape)
print('Shape of the max gradients:', max_gradients.shape)
###Output
_____no_output_____
###Markdown
5. Visualize the input image and gradients side-by-side
###Code
backprop.visualize(input_, target_class)
###Output
_____no_output_____
###Markdown
6. Visualize with _guided_ backprogation
###Code
backprop.visualize(input_, target_class, guided=True)
###Output
_____no_output_____
###Markdown
Visualize image-specific class saliency with backpropagation---A quick demo of creating saliency maps for CNNs using [FlashTorch 🔦](https://github.com/MisaOgura/flashtorch).❗This notebook is for those who are using this notebook in **Google Colab**.If you aren't on Google Colab already, please head to the Colab version of this notebook **[here](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/visualise_saliency_with_backprop_colab.ipynb)** to execute.---The gradients obtained can be used to visualise an image-specific class saliency map, which can gives some intuition on regions within the input image that contribute the most (and least) to the corresponding output.More details on saliency maps: [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf). 0. Set upA GPU runtime is available on Colab for free, from the `Runtime` tab on the top menu bar.It is **recommended to use GPU** as a runtime for the enhanced speed of computation.
###Code
# Install flashtorch
!pip install flashtorch torch==1.5.0 torchvision==0.6.0 -U
# Download example images
!mkdir -p images
!wget -nv \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/great_grey_owl.jpg \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/peacock.jpg \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/toucan.jpg \
-P /content/images
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torchvision.models as models
from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop
###Output
_____no_output_____
###Markdown
1. Load an image
###Code
image = load_image('/content/images/great_grey_owl.jpg')
plt.imshow(image)
plt.title('Original image')
plt.axis('off');
###Output
_____no_output_____
###Markdown
2. Load a pre-trained Model
###Code
model = models.alexnet(pretrained=True)
###Output
_____no_output_____
###Markdown
3. Create an instance of Backprop with the model
###Code
backprop = Backprop(model)
###Output
_____no_output_____
###Markdown
4. Visualize saliency maps
###Code
# Transform the input image to a tensor
owl = apply_transforms(image)
# Set a target class from ImageNet task: 24 in case of great gray owl
target_class = 24
# Ready to roll!
backprop.visualize(owl, target_class, guided=True, use_gpu=True)
###Output
_____no_output_____
###Markdown
5. What about other birds? What makes peacock a peacock...?
###Code
peacock = apply_transforms(load_image('/content/images/peacock.jpg'))
backprop.visualize(peacock, 84, guided=True, use_gpu=True)
###Output
_____no_output_____
###Markdown
Or a toucan?
###Code
toucan = apply_transforms(load_image('/content/images/toucan.jpg'))
backprop.visualize(toucan, 96, guided=True, use_gpu=True)
###Output
_____no_output_____
###Markdown
Visualize image-specific class saliency with backpropagation---A quick demo of creating saliency maps for CNNs using [FlashTorch 🔦](https://github.com/MisaOgura/flashtorch).❗This notebook is for those who are using this notebook in **Google Colab**.If you aren't on Google Colab already, please head to the Colab version of this notebook **[here](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/visualise_saliency_with_backprop_colab.ipynb)** to execute.---The gradients obtained can be used to visualise an image-specific class saliency map, which can gives some intuition on regions within the input image that contribute the most (and least) to the corresponding output.More details on saliency maps: [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf). 0. Set upA GPU runtime is available on Colab for free, from the `Runtime` tab on the top menu bar.It is **recommended to use GPU** as a runtime for the enhanced speed of computation.
###Code
# Install flashtorch
!pip install flashtorch
# Download example images
!mkdir -p images
!wget -nv \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/great_grey_owl.jpg \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/peacock.jpg \
https://github.com/MisaOgura/flashtorch/raw/master/examples/images/toucan.jpg \
-P /content/images
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torchvision.models as models
from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop
###Output
_____no_output_____
###Markdown
1. Load an image
###Code
image = load_image('/content/images/great_grey_owl.jpg')
plt.imshow(image)
plt.title('Original image')
plt.axis('off');
###Output
_____no_output_____
###Markdown
2. Load a pre-trained Model
###Code
model = models.alexnet(pretrained=True)
###Output
_____no_output_____
###Markdown
3. Create an instance of Backprop with the model
###Code
backprop = Backprop(model)
###Output
_____no_output_____
###Markdown
4. Visualize saliency maps
###Code
# Transform the input image to a tensor
owl = apply_transforms(image)
# Set a target class from ImageNet task: 24 in case of great gray owl
target_class = 24
# Ready to roll!
backprop.visualize(owl, target_class, guided=True, use_gpu=True)
###Output
_____no_output_____
###Markdown
5. What about other birds? What makes peacock a peacock...?
###Code
peacock = apply_transforms(load_image('/content/images/peacock.jpg'))
backprop.visualize(peacock, 84, guided=True, use_gpu=True)
###Output
_____no_output_____
###Markdown
Or a toucan?
###Code
toucan = apply_transforms(load_image('/content/images/toucan.jpg'))
backprop.visualize(toucan, 96, guided=True, use_gpu=True)
###Output
_____no_output_____ |
competitions/Facial_Keypoints_Detection/Facial_Keypoints_Detection.ipynb | ###Markdown
[facial-keypoints-detection](https://www.kaggle.com/c/facial-keypoints-detection), 这是一个人脸识别任务,任务是识别人脸图片中的眼睛、鼻子、嘴的位置。训练集包含以下15个位置的坐标,行末是图片的像素值,共96*96个像素值。测试集只包含图片的像素值。```left_eye_center, right_eye_center, left_eye_inner_corner, left_eye_outer_corner, right_eye_inner_corner, right_eye_outer_corner, left_eyebrow_inner_end, left_eyebrow_outer_end, right_eyebrow_inner_end, right_eyebrow_outer_end, nose_tip, mouth_left_corner, mouth_right_corner, mouth_center_top_lip, mouth_center_bottom_lip```
###Code
import cPickle as pickle
from datetime import datetime
import os
import sys
import numpy as np
import pandas as pd
from lasagne import layers
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import NeuralNet
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
import theano
###Output
/Library/Python/2.7/site-packages/theano/tensor/signal/downsample.py:6: UserWarning: downsample module has been moved to the theano.tensor.signal.pool module.
"downsample module has been moved to the theano.tensor.signal.pool module.")
###Markdown
数据载入与预览
###Code
train_file = 'training.csv'
test_file = 'test.csv'
def load(test=False, cols=None):
"""
载入数据,通过参数控制载入训练集还是测试集,并筛选特征列
"""
fname = test_file if test else train_file
df = pd.read_csv(os.path.expanduser(fname))
# 将图像数据转换为数组
df['Image'] = df['Image'].apply(lambda x: np.fromstring(x, sep=' '))
# 筛选指定的数据列
if cols:
df = df[list(cols) + ['Image']]
print(df.count()) # 每列的简单统计
df = df.dropna() # 删除空数据
# 归一化到0到1
X = np.vstack(df['Image'].values) / 255.
X = X.astype(np.float32)
# 针对训练集目标标签进行归一化
if not test:
y = df[df.columns[:-1]].values
y = (y - 48) / 48
X, y = shuffle(X, y, random_state=42)
y = y.astype(np.float32)
else:
y = None
return X, y
# 将单行像素数据转换为三维矩阵
def load2d(test=False, cols=None):
X, y = load(test=test, cols=cols)
X = X.reshape(-1, 1, 96, 96)
return X, y
###Output
_____no_output_____
###Markdown
数据处理一种方式是我们训练一个分类器,用来分类所有的目标特征。另一种是针对眼镜、鼻子、嘴分别设置不同的分类器,每个分类器只预测单个目标。通过观察数据我们发现,训练集中有许多缺失数据,如果训练一个分类器,删掉缺失数据会让我们的样本集变小,不能很好地利用起数据,因此,我们选择第二种方式,每个目标训练一个分类器,这样更好的利用样本数据。
###Code
from collections import OrderedDict
from sklearn.base import clone
SPECIALIST_SETTINGS = [
dict(
columns=(
'left_eye_center_x', 'left_eye_center_y',
'right_eye_center_x', 'right_eye_center_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'nose_tip_x', 'nose_tip_y',
),
flip_indices=(),
),
dict(
columns=(
'mouth_left_corner_x', 'mouth_left_corner_y',
'mouth_right_corner_x', 'mouth_right_corner_y',
'mouth_center_top_lip_x', 'mouth_center_top_lip_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y',
),
flip_indices=(),
),
dict(
columns=(
'left_eye_inner_corner_x', 'left_eye_inner_corner_y',
'right_eye_inner_corner_x', 'right_eye_inner_corner_y',
'left_eye_outer_corner_x', 'left_eye_outer_corner_y',
'right_eye_outer_corner_x', 'right_eye_outer_corner_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
dict(
columns=(
'left_eyebrow_inner_end_x', 'left_eyebrow_inner_end_y',
'right_eyebrow_inner_end_x', 'right_eyebrow_inner_end_y',
'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y',
'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
]
class FlipBatchIterator(BatchIterator):
flip_indices = [
(0, 2), (1, 3),
(4, 8), (5, 9), (6, 10), (7, 11),
(12, 16), (13, 17), (14, 18), (15, 19),
(22, 24), (23, 25),
]
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
if yb is not None:
# Horizontal flip of all x coordinates:
yb[indices, ::2] = yb[indices, ::2] * -1
# Swap places, e.g. left_eye_center_x -> right_eye_center_x
for a, b in self.flip_indices:
yb[indices, a], yb[indices, b] = (
yb[indices, b], yb[indices, a])
return Xb, yb
class EarlyStopping(object):
def __init__(self, patience=100):
self.patience = patience
self.best_valid = np.inf
self.best_valid_epoch = 0
self.best_weights = None
def __call__(self, nn, train_history):
current_valid = train_history[-1]['valid_loss']
current_epoch = train_history[-1]['epoch']
if current_valid < self.best_valid:
self.best_valid = current_valid
self.best_valid_epoch = current_epoch
self.best_weights = nn.get_all_params_values()
elif self.best_valid_epoch + self.patience < current_epoch:
print("Early stopping.")
print("Best valid loss was {:.6f} at epoch {}.".format(
self.best_valid, self.best_valid_epoch))
nn.load_params_from(self.best_weights)
raise StopIteration()
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = np.cast['float32'](self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
def float32(k):
return np.cast['float32'](k)
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('dropout1', layers.DropoutLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('dropout2', layers.DropoutLayer),
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('dropout3', layers.DropoutLayer),
('hidden4', layers.DenseLayer),
('dropout4', layers.DropoutLayer),
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 96, 96),
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
dropout1_p=0.1,
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
dropout2_p=0.2,
conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
dropout3_p=0.3,
hidden4_num_units=300,
dropout4_p=0.5,
hidden5_num_units=300,
output_num_units=30, output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
batch_iterator_train = BatchIterator(batch_size = 100),
batch_iterator_test = BatchIterator(batch_size = 100),
# batch_iterator_train=FlipBatchIterator(batch_size=128),
# on_epoch_finished=[
# AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
# AdjustVariable('update_momentum', start=0.9, stop=0.999),
# EarlyStopping(patience=200),
# ],
max_epochs=10,
verbose=1,
)
def fit_specialists(fname_pretrain=None):
if fname_pretrain:
with open(fname_pretrain, 'rb') as f:
net_pretrain = pickle.load(f)
else:
net_pretrain = None
specialists = OrderedDict()
for setting in SPECIALIST_SETTINGS:
cols = setting['columns']
X, y = load2d(cols=cols)
model = clone(net)
model.output_num_units = y.shape[1]
model.batch_iterator_train.flip_indices = setting['flip_indices']
model.max_epochs = int(4e6 / y.shape[0])
if 'kwargs' in setting:
# an option 'kwargs' in the settings list may be used to
# set any other parameter of the net:
vars(model).update(setting['kwargs'])
if net_pretrain is not None:
# if a pretrain model was given, use it to initialize the
# weights of our new specialist model:
model.load_params_from(net_pretrain)
print("Training model for columns {} for {} epochs".format(
cols, model.max_epochs))
model.fit(X, y)
specialists[cols] = model
with open('net-specialists.pickle', 'wb') as f:
# this time we're persisting a dictionary with all models:
pickle.dump(specialists, f, -1)
def predict(fname_specialists='net-specialists.pickle'):
with open(fname_specialists, 'rb') as f:
specialists = pickle.load(f)
X = load2d(test=True)[0]
y_pred = np.empty((X.shape[0], 0))
for model in specialists.values():
y_pred1 = model.predict(X)
y_pred = np.hstack([y_pred, y_pred1])
columns = ()
for cols in specialists.keys():
columns += cols
y_pred2 = y_pred * 48 + 48
y_pred2 = y_pred2.clip(0, 96)
df = DataFrame(y_pred2, columns=columns)
lookup_table = read_csv(os.path.expanduser(FLOOKUP))
values = []
for index, row in lookup_table.iterrows():
values.append((
row['RowId'],
df.ix[row.ImageId - 1][row.FeatureName],
))
now_str = datetime.now().isoformat().replace(':', '-')
submission = DataFrame(values, columns=('RowId', 'Location'))
filename = 'submission-{}.csv'.format(now_str)
submission.to_csv(filename, index=False)
print("Wrote {}".format(filename))
if __name__ == '__main__':
fit_specialists()
predict()
###Output
left_eye_center_x 7039
left_eye_center_y 7039
right_eye_center_x 7036
right_eye_center_y 7036
Image 7049
dtype: int64
Training model for columns ('left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y') for 568 epochs
|
Data/SQLAlchemy_climate.ipynb | ###Markdown
Reflect Tables into SQLAlchemy ORM
###Code
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
inspector = inspect(engine)
inspector.get_table_names()
columns = inspector.get_columns('measurement')
for column in columns:
print(column['name'], column['type'])
columns = inspector.get_columns('station')
for column in columns:
print(column['name'], column['type'])
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect = True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
###Output
_____no_output_____
###Markdown
Exploratory Climate Analysis
###Code
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
# Perform a query to retrieve the data and precipitation scores
# Save the query results as a Pandas DataFrame and set the index to the date column
# Sort the dataframe by date
# Use Pandas Plotting with Matplotlib to plot the data
# find the last date of measurement
session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# find the start date
one_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
print (one_year)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
last_year = [Measurement.date, Measurement.prcp]
precip = session.query(*last_year).filter(Measurement.date >= '2016-08-23').all()
precip_df = pd.DataFrame(precip).rename(columns={"date": "Date", "prcp": "Precipitation"}).dropna()
precip_df
precip_df = precip_df.set_index("Date")
precip_df
fig, ax = plt.subplots(figsize = (14, 6))
precip_df.plot(ax = ax, x_compat = True)
ax.set_xlabel('Date')
ax.set_ylabel('Precipitation (inches)')
ax.set_title("Precipitation 08/2016 - 08-2017")
plt.savefig("precip1.png")
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
precip_df.describe()
# Design a query to show how many stations are available in this dataset?
stations = session.query(Station.id).distinct().count()
stations
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
active_station = session.query(Station.station, func.count(Measurement.id)).select_from(Measurement).\
join(Station, Measurement.station == Station.station).group_by(Station.station).\
order_by(func.count(Measurement.id).desc()).all()
for result in active_station:
print(f"Station: {result[0]}\tCount: {result[1]}")
most_active_station = active_station[0][0]
most_active_station
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
temp = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.station == most_active_station).all()
print(f"Lowest Temperature: {temp[0][0]} F")
print(f"Average Temperature: {round(temp[0][2],2)} F")
print(f"Highest Temperature: {temp[0][1]} F")
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
top_station = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == most_active_station).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= dt.date(2016, 8, 23)).all()
top_station
top_station_df = pd.DataFrame(top_station, columns = ['date', 'temperature'])
top_station_df.set_index('date', inplace = True)
top_station_df
temps = [Measurement.date, Measurement.tobs]
temp_histo = session.query(*temps).filter(Measurement.date >= '2016-08-23').filter(Measurement.station == 'USC00519281' ).all()
temp_histo = pd.DataFrame(temp_histo)
temp_histo.plot.hist(bins=12)
plt.savefig('temp1.png')
plt.xlabel("Temperature")
###Output
_____no_output_____
###Markdown
Bonus Challenge Assignment
###Code
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
temps2 = calc_temps('2017-03-12', '2017-03-19')
print(f"Min Temperature: {temps2[0][0]} F")
print(f"Average Temperature: {round(temps2[0][1],2)} F")
print(f"Max Temperature: {temps2[0][2]} F")
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
temp_plot = temps2[0][2] - temps2[0][0]
avg_temp = temps2[0][1]
fig, ax = plt.subplots(figsize = (2, 6))
ax.bar(1, avg_temp, yerr = temp_plot/2, width = 0.1, color='pink')
ax.set_xticks([1])
ax.set_xticklabels([""])
ax.set_title('Trip Avg Temp')
ax.set_ylabel('Temperature (F)')
plt.savefig("temp2.png")
plt.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
total_rainfall = session.query(Station.station, Station.name, Station.latitude, Station.longitude,
Station.elevation, func.avg(Measurement.prcp)).\
filter(Measurement.station == Station.station).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= dt.date(2017,3,12)).group_by(Station.station).\
order_by(func.avg(Measurement.prcp).desc()).all()
total_rainfall_df = pd.DataFrame(total_rainfall, columns = ['Station', 'Name', 'Latitude', 'Longitude',
'Elevation', 'Avg. Precipitation (in)'])
total_rainfall_df
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
dates = ["03-12", "03-13", "03-14", "03-15", "03-16", "03-17", "03-18", "03-19"]
normals = []
for date in dates:
day = {}
day['Date'] = f"2018-{date}"
normal = daily_normals(date)
day['Min'] = normal[0][0]
day['Avg'] = round(normal[0][1])
day['Max'] = normal[0][2]
normals.append(day)
normals
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
trip_dates = pd.DataFrame(normals)
trip_dates.set_index('Date', inplace = True)
trip_dates
# Plot the daily normals as an area plot with `stacked=False`
fig, ax = plt.subplots(figsize = (8, 5))
trip_dates.plot.area(ax = ax, stacked = False)
ax.set_xlabel('Date')
ax.set_ylabel('Temperature (F)')
ax.set_title('Daily Normal Temperatures')
temps3 = [f"2018-{date}" for date in dates]
ax.set_xticks(np.arange(len(temps3)))
ax.set_xticklabels(dates, rotation = 45)
plt.savefig('temp3.png')
plt.grid()
plt.show()
###Output
_____no_output_____ |
Exam2Answers.ipynb | ###Markdown
Problem 1: Controls AGI code (AT1G01040) in the file LocusGene.tsv and Germplasm.tsv. I check that the AGI code is present in both files and take the first column of both to compare if the values are the same or differ. To check if the AGI codes are the same in both files or they differ.
###Code
#cat LocusGene.tsv to check the data looks like. As it is tab-separated values, the first column correspond to the AGI code.
locusfile = open("LocusGene.tsv", "r")
LocusGene1_first_value = [] # you make a list with the values for the first column (AGI code)
for line in locusfile.readlines(): #read from left to right and remove the
LocusGene1, Gene, Proteinlength = line.split('\t') #split() method returns a list of strings after breaking the given string by the specified separator
LocusGene1_first_value.append(LocusGene1)
#print(LocusGene1)
Germplasm_first_value = [] #the same for the second file and you take the column called Locusplasm2
Germfile = open("Germplasm.tsv", "r")
for line in Germfile.readlines():
Locusplasm2, Gene, Phenotype, pubmed = line.split('\t')
Germplasm_first_value.append(Locusplasm2)
#print(Locusplasm2)
# Make a conditional to know if the locus differs in length.
if len(LocusGene1_first_value) != len(Germplasm_first_value):
print('The number of rows in file LocusGene.tsv is different from the number of rows in the file Germplasm.csv.')
# I the lenght is the same then, we compare both values to see if the locus is the same and we print Same or Different Locus.
for i in range(len(LocusGene1_first_value)):
if LocusGene1_first_value[i]==Germplasm_first_value[i]:
print("Same Locus: ", LocusGene1_first_value[i], Germplasm_first_value[i])
else:
print("Different Locus:", LocusGene1_first_value[i], Germplasm_first_value[i])
#Close both files
locusfile.close()
Germfile.close()
###Output
Same Locus: Locus Locus
Same Locus: AT1G01040 AT1G01040
Same Locus: AT1G01060 AT1G01060
Same Locus: AT1G01140 AT1G01140
Same Locus: AT1G01220 AT1G01220
Same Locus: AT2G03720 AT2G03720
Same Locus: AT2G03800 AT2G03800
Same Locus: AT2G04240 AT2G04240
Same Locus: AT2G05210 AT2G05210
Same Locus: AT3G02130 AT3G02130
Same Locus: AT3G02140 AT3G02140
Same Locus: AT3G02230 AT3G02230
Same Locus: AT3G02260 AT3G02260
Same Locus: AT3G02310 AT3G02310
Same Locus: AT3G02680 AT3G02680
Same Locus: AT3G02850 AT3G02850
Same Locus: AT3G02870 AT3G02870
Same Locus: AT3G03260 AT3G03260
Same Locus: AT4G14790 AT4G14790
Same Locus: AT4G15210 AT4G15210
Same Locus: AT4G15560 AT4G15560
Same Locus: AT4G15570 AT4G15570
Same Locus: AT4G15802 AT4G15802
Same Locus: AT4G15880 AT4G15880
Same Locus: AT4G16420 AT4G16420
Same Locus: AT4G16480 AT4G16480
Same Locus: AT5G10480 AT5G10480
Same Locus: AT5G10510 AT5G10510
Same Locus: AT5G11110 AT5G11110
Same Locus: AT5G11260 AT5G11260
Same Locus: AT5G11510 AT5G11510
Same Locus: AT5G12200 AT5G12200
Same Locus: AT5G13290 AT5G13290
###Markdown
Problem 2: Design and create the database.2) I Create the database called "Exam2db"2) Table 1 with columns (Locus, Gene, and ProteinLength) for LocusGene.tsv3) Table 2 with 1:1 relationship (locus, germplasm, phenotype and pubmed) for Germplasm.tsv
###Code
# To start mysql with the SqLMagic and connect.
%load_ext sql
%config SqlMagic.autocommit=False
%sql mysql+pymysql://root:[email protected]:3306/mysql
#In case I need to remove or drop the table
#%sql drop database Exam2db;
#Sow databases to see what is inside, before the creation of the database
%sql show databases;
#To create the database called "Exam2db" and show it
%sql create database Exam2db;
%sql show databases;
#Table 1 LocusGene with columns (Locus, Gene, and ProteinLength) and values type (VARCHAR (10) since the AGI code has 9 characters ,VARCHAR (10) since the gene usually has less than 10 characters AND INTERGER since the length is a number) for LocusGene.tsv
#As the Locus gene is common for both data files (Germplasm and LocusGene) I will use it as primary Key.
%sql use Exam2db;
%sql CREATE TABLE LocusGene(Locus VARCHAR(10) PRIMARY KEY, Gene VARCHAR(10) NOT NULL, ProteinLength INTEGER NOT NULL);
%sql DESCRIBE LocusGene;
#Table 2 LocusGene with columns (Locus, Germplasm, Phenotype and pubmed) and values type (VARCHAR(10) for the locus which si the primary key, VARCHAR(50),VARCHAR(600)a nd the INTEGER which is the pubmed number)
%sql use Exam2db; #use the database
%sql CREATE TABLE Germplasm(Locus VARCHAR(10) PRIMARY KEY, Germplasm VARCHAR(50) NOT NULL, Phenotype VARCHAR(600) NOT NULL, pubmed INTEGER NOT NULL);
%sql DESCRIBE Germplasm; # to show the structure of a table
###Output
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
0 rows affected.
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
0 rows affected.
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
4 rows affected.
###Markdown
As we have check it before, each row in both table is equivalent to the other one in the table, for this reason the id or the PRIMARY KEY for both is the Locus. This is the relation 1:1 to join te tables with the command SELECT * FROM
###Code
#This is use to make the relationship since the Locus of both tables is the same, we use them as the ID or te KEY
%sql SELECT * FROM Germplasm, LocusGene WHERE Germplasm.Locus=LocusGene.Locus
###Output
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
32 rows affected.
###Markdown
Problem 3 : Fill the database. To fill the database I am importing the information from the .TSV files,
###Code
#I first connect mysql to the database db=Exam2db
import pymysql.cursors
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='Exam2db', # the name of the database we use
charset='utf8mb4', # it is important for unusual characters!
cursorclass=pymysql.cursors.DictCursor)
#I open both files to read them
Germfile = open("Germplasm.tsv", "r")
Germfile.seek(0)
Germfile = Germfile.readlines()
Locusfile = open("LocusGene.tsv", "r")
Locusfile.seek(0)
Locusfile = Locusfile.readlines()
# I read all the files row by row and split them removing the tabs, since the file is separated by tabs. I assign to index of the row the name of a variable (namelocus,namegerm, namephen,namepub)
try:
with connection.cursor() as cursor:
for data in range (1, len(Germfile)):
row = Germfile[data].split ('\t')
namelocus = row[0]
namegerm = row[1]
namephen= row[2]
namepub= row[3]
# I will insert the data using sql into the table Germplasm and I use string (%s)and digit (%d) format
tablevariables = (namelocus,namegerm, namephen,namepub)
sql = "INSERT INTO Germplasm (Locus, Germplasm, Phenotype, pubmed) VALUES (%s, %s, %s, %s)"
try:
cursor.execute (sql,tablevariables)
except:
print('Warning, the entry ' + namelocus + ' is already in mysql.')
# I use the try and except to do not create a duplicate. In case, the data has been already loaded it will give me a warning advice.
#I have done the same for the second table
connection.commit()
for dat in range (1, len(Locusfile)):
row = Locusfile [dat].split('\t')
namelocus = row [0]
namegene = row [1]
nameproteinlength = row [2]
tablevariables_LocusGene = (namelocus,namegene, nameproteinlength)
sql = "INSERT INTO LocusGene(Locus, Gene, ProteinLength) VALUES (%s, %s, %s)"
try:
res = cursor.execute(sql,tablevariables_LocusGene)
except:
print('Warning, the entry ' + namelocus + ' is already in mysql.')
connection.commit() #commit the changes
finally:
print("Closing connection")
connection.close() #close the conection with the database
%sql SELECT * FROM Germplasm
%sql SELECT * FROM LocusGene
###Output
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
32 rows affected.
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
32 rows affected.
###Markdown
Problem 4: 1) the full join 2) Create a joined report that only includes the Genes SKOR and MAA3 3) Create a report that counts the number of entries for each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx) 4)Create a report that shows the average protein length for the genes on each Chromosome (AT1Gxxxxxx to AT5Gxxxxxxx)
###Code
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='Exam2db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
report = open("report1.txt", "w") # open report for write
try:
with connection.cursor() as cursor:
report.write('Locus\tGermplasm\tPhenotype\tPubmed\tLocusgene\tGene\tProteinlength\n')
sql = "SELECT * FROM Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus"
cursor.execute(sql)
results = cursor.fetchall()
for result in results:
for data in result.values():
report.write(str(data))
report.write('\t')
report.write('\n')
connection.commit()
checkcontent = open("report1.txt", "r") #open only to read the file
print(checkcontent.read()) # print the content of the file
report.close()
checkcontent.close()
finally:
print("")
connection.close()
###Output
###Markdown
Problem 4.2: solution
###Code
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='Exam2db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
connection.autocommit = False
report= open("report1.txt", "a")
try:
with connection.cursor() as cursor:
sql = """SELECT Germplasm.Locus,Germplasm.Germplasm,Germplasm.Phenotype,Germplasm.Pubmed,LocusGene.Locus,
LocusGene.Gene,LocusGene.Proteinlength from Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Gene LIKE 'SKOR' OR Gene LIKE 'MAA3';"""
cursor.execute(sql) # select data from a database
results = cursor.fetchall()
#print(results)
for result in results:
report.write("Solution problem 4.2 report of the Genes SKOR and MAA3:\n") # The first loop read all the data of the sentence selected with the sql
for data in result.values(): #The second loop read the values
report.write(str(data)) #write the data
report.write(' ')
report.write('\n') #write a new line
checkcontent = open("report1.txt", "r") #open only to read the file
print(checkcontent.read()) # print the content of the file
report.close() #close the report and make the commit
checkcontent.close()
connection.commit()
finally:
print("")
connection.close() #close the conenction
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='Exam2db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
connection.autocommit = False
#I use the command SELECT COUNT (*) to find a specific sequence inside AT1G/AT2G/AT3G/AT4G/AT5G. I am doing the same for all of them
try:
with connection.cursor() as cursor:
sql1 = """SELECT COUNT(*) FROM Germplasm RIGHT JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Germplasm.Locus LIKE 'AT1G%';"""
cursor.execute(sql1)
results1 = cursor.fetchall()
report= open("report1.txt", "a") #open the fil for append to do not overwrite
report.write ("Number of entries Chromosome 1" + str(results1) + "\n")
sql2 = """SELECT COUNT(*) FROM Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Germplasm.Locus LIKE 'AT2G%';"""
cursor.execute(sql2)
results2 = cursor.fetchall()
report.write("Number of entries Chromosome 2" + str(results2)+ "\n")
sql3 = """SELECT COUNT(*) FROM Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Germplasm.Locus LIKE 'AT3G%';"""
cursor.execute(sql3)
results3 = cursor.fetchall()
report.write("Number of entries Chromosome 3 " + str(results3) + "\n")
sql4 = """SELECT COUNT(*) FROM Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Germplasm.Locus LIKE 'AT4G%';"""
cursor.execute(sql4)
results4 = cursor.fetchall()
report.write("Number of entries Chromosome 4 " + str(results4) + "\n")
sql5 = """SELECT COUNT(*) FROM Germplasm INNER JOIN LocusGene ON Germplasm.Locus = LocusGene.Locus WHERE Germplasm.Locus LIKE 'AT2G%';"""
cursor.execute(sql5)
results5 = cursor.fetchall()
report.write("Number of entries Chromosome 5" + str(results5) + "\n")
report.close()
checkcontent = open("report1.txt", "r") # I open the file to read and do not modify anything
print(checkcontent.read())
connection.commit() # commit
finally:
print("")
connection.close() # close conneciton
###Output
Locus Germplasm Phenotype Pubmed Locusgene Gene Proteinlength
AT1G01040 CS3828 Increased abundance of miRNA precursors. 17369351 AT1G01040 DCL1 332
AT1G01060 lhy-101 The mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant. 16891401 AT1G01060 LHY 290
AT1G01140 SALK_058629 hypersensitive to low potassium media 17486125 AT1G01140 CIPK9 223
AT1G01220 SALK_012400C fkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected. 18199744 AT1G01220 FKGP 190
AT2G03720 SALK_042433 Multiple straight hairs 16367956 AT2G03720 MRH6 189
AT2G03800 gek1-1 Ethanol hypersensitivity. 15215505 AT2G03800 GEK1 196
AT2G04240 xerico Resistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype. 17933900 AT2G04240 XERICO 256
AT2G05210 pot1-1 No visible phenotype. 17627276 AT2G05210 POT1A 221
AT3G02130 rpk2-2 The homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants. 17419837 AT3G02130 RPK2 284
AT3G02140 afp4-1 Decreased germination on high concentrations of glucose and sorbitol. 18484180 AT3G02140 TMAC2 300
AT3G02230 rgp1-1 rgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls. 21478444 AT3G02230 RGP1 301
AT3G02260 tir3-1 RGLG1:rglg1 rglg2 The triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence. 17586653 AT3G02260 BIG 279
AT3G02310 sep2-1 Non-described subtle phenotype. 10821278 AT3G02310 SEP2 175
AT3G02680 atnbs1-1 Significantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions. 17672843 AT3G02680 NBS1 190
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
AT3G02870 vtc4-1 ascorbate deficient 16595667 AT3G02870 VTC4 311
AT3G03260 hdg8-1 No visible phenotype. 16778018 AT3G03260 HDG8 194
AT4G14790 pdd17 Defective pollen development. 19237690 AT4G14790 SUV3 312
AT4G15210 bmy1-2 Plants cold-shocked for 6h have an increased starch content compared to wildtype. 16297066 AT4G15210 BAM5 313
AT4G15560 cla1-1 Mutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves. 10982425 AT4G15560 DXS 219
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
AT4G15802 Athspb-2 Early flowering, reduced fertility, aborted seeds. 20388662 AT4G15802 HSBP 254
AT4G15880 esd4-2 Decreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1. 17513499 AT4G15880 ESD4 265
AT4G16420 prz1-1 Altered response to auxin and cytokinin 12747832 AT4G16420 ADA2B 279
AT4G16480 atint4-2 No visible phenotype. 16603666 AT4G16480 INT4 284
AT5G10480 pas2-3 Segregates 25% embryo lethal. 18799749 AT5G10480 PAS2 301
AT5G10510 plt3-1 Short roots and shortened root meristem. 17960244 AT5G10510 AIL6 310
AT5G11110 kns2 Defects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile. 18779216 AT5G11110 SPS2 232
AT5G11260 hy5-101 Under FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller. 16891401 AT5G11260 HY5 221
AT5G11510 myb3r4-1 No visible phenotype. 17287251 AT5G11510 MYB3R-4 336
AT5G12200 pyd2-2 The pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source. 19413687 AT5G12200 PYD2 310
AT5G13290 crn-1 Increased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3. 12345678 AT5G13290 CRN 189
Report of the Genes SKOR and MAA3:
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
Report of the Genes SKOR and MAA3:
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
Report of the Genes SKOR and MAA3:
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
Report of the Genes SKOR and MAA3:
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
Number of entries Chromosome 1[{'COUNT(*)': 4}]
Number of entries Chromosome 2[{'COUNT(*)': 4}]
Number of entries Chromosome 3 [{'COUNT(*)': 9}]
Number of entries Chromosome 4 [{'COUNT(*)': 8}]
Number of entries Chromosome 5[{'COUNT(*)': 4}]
###Markdown
Problem 4.4: In this case I open the connection, and I a using the SELECT AVG function to find in the INNER Join of both tables (Germplasm and LocusGene) the average lenght of proteinLenght in entries that correspond to Locus ATG1,ATG2 ,ATG3 ,ATG4 and ATG5 . All the information is wrote using the "Append" to do not overwrite the information. We will get the length in decimals, since the average is a floating point type of data.
###Code
connection = pymysql.connect(host='localhost', #Open the conecction with the terminal
user='root',
password='root',
db='Exam2db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
connection.autocommit = False # The changes are temporals until I commit
# I use the command Selection Average to calculate the Protein lenght of the INNER join of tboth tables for the structure ATG1,2,3,4 and 5.
try:
with connection.cursor() as cursor:
sql1 = """SELECT AVG(ProteinLength) FROM LocusGene INNER JOIN Germplasm ON LocusGene.Locus = Germplasm.Locus WHERE LocusGene.Locus LIKE 'AT1G%';"""
cursor.execute(sql1) #I use the command SELECT AVR (the column)
results1 = cursor.fetchall()
report= open("report1.txt", "a") #Open the Report file for append and put the pointer at the END of the file
report.write("Average ProteinLength in the Chromosome 1 is: " + str(results1)+"\n") #print the results as a string
sql2 = """SELECT AVG(ProteinLength) FROM LocusGene INNER JOIN Germplasm ON LocusGene.Locus = Germplasm.Locus WHERE LocusGene.Locus LIKE 'AT2G%';"""
cursor.execute(sql2)
results2 = cursor.fetchall()
report.write("Average ProteinLength in the Chromosome 2 is: " + str(results2)+"\n")
sql3 = """SELECT AVG(ProteinLength) FROM LocusGene INNER JOIN Germplasm ON LocusGene.Locus = Germplasm.Locus WHERE LocusGene.Locus LIKE 'AT3G%';"""
cursor.execute(sql3)
results3 = cursor.fetchall()
report.write("Average ProteinLength in the Chromosome 3 is: " + str(results3)+"\n")
sql4 = """SELECT AVG(ProteinLength) FROM LocusGene INNER JOIN Germplasm ON LocusGene.Locus = Germplasm.Locus WHERE LocusGene.Locus LIKE 'AT4G%';"""
cursor.execute(sql4)
results4 = cursor.fetchall()
report.write("Average ProteinLength in the Chromosome 4 is: " + str(results4)+"\n")
sql5 = """SELECT AVG(ProteinLength) FROM LocusGene INNER JOIN Germplasm ON LocusGene.Locus = Germplasm.Locus WHERE LocusGene.Locus LIKE 'AT5G%';"""
cursor.execute(sql5)
results5 = cursor.fetchall()
report.write("Average ProteinLength in the Chromosome 5 is: " + str(results5)+"\n")
Report.close() # Close the file
checkcontent = open("report1.txt", "r") # I open the file to read and do not modify anything
print(checkcontent.read())
connection.commit() #Commit the changes
finally:
print("")
connection.close() # Close the conecction
###Output
Locus Germplasm Phenotype Pubmed Locusgene Gene Proteinlength
AT1G01040 CS3828 Increased abundance of miRNA precursors. 17369351 AT1G01040 DCL1 332
AT1G01060 lhy-101 The mutant plants are hypersensitive to both FRc and Rc light treatments in hypocotyl elongation and exhibits a small reciprocal enlargement in cotyledon area, albeit not statistically significant. 16891401 AT1G01060 LHY 290
AT1G01140 SALK_058629 hypersensitive to low potassium media 17486125 AT1G01140 CIPK9 223
AT1G01220 SALK_012400C fkgp-1 mutants have about 40 times more L-fucose than wild type Arabidopsis plants, but the levels of other monosaccharides do not appear to differ significantly in the mutants. No obvious phenotypic abnormalities were observed in the fkgp-1 mutants, nor were any differences in the sugar composition of cell wall polysaccharides detected. 18199744 AT1G01220 FKGP 190
AT2G03720 SALK_042433 Multiple straight hairs 16367956 AT2G03720 MRH6 189
AT2G03800 gek1-1 Ethanol hypersensitivity. 15215505 AT2G03800 GEK1 196
AT2G04240 xerico Resistant to exogenous ABA. Seeds contained lower amounts of endogenous ABA than wildtype. 17933900 AT2G04240 XERICO 256
AT2G05210 pot1-1 No visible phenotype. 17627276 AT2G05210 POT1A 221
AT3G02130 rpk2-2 The homozygous progeny is indistinguishable from wild-type plants during vegetative growth but showed several morphological alterations after bolting. These plants displayed enhanced inflorescence branching and formed three times as many siliques and flowers as did wild-type plants. 17419837 AT3G02130 RPK2 284
AT3G02140 afp4-1 Decreased germination on high concentrations of glucose and sorbitol. 18484180 AT3G02140 TMAC2 300
AT3G02230 rgp1-1 rgp1-1 mutants have significantly lower levels of UDP-L-arabinose mutase activity compared to wild-type plants and significantly lower levels of arabinose in their cell walls. 21478444 AT3G02230 RGP1 301
AT3G02260 tir3-1 RGLG1:rglg1 rglg2 The triple homozygous progeny has low viability, accumulated anthocyanin, and all plants died before shoot emergence. 17586653 AT3G02260 BIG 279
AT3G02310 sep2-1 Non-described subtle phenotype. 10821278 AT3G02310 SEP2 175
AT3G02680 atnbs1-1 Significantly smaller when grown in the presence of methyl methanosulfonate (MMS) with root growth. Normal growth under standard growth conditions. 17672843 AT3G02680 NBS1 190
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
AT3G02870 vtc4-1 ascorbate deficient 16595667 AT3G02870 VTC4 311
AT3G03260 hdg8-1 No visible phenotype. 16778018 AT3G03260 HDG8 194
AT4G14790 pdd17 Defective pollen development. 19237690 AT4G14790 SUV3 312
AT4G15210 bmy1-2 Plants cold-shocked for 6h have an increased starch content compared to wildtype. 16297066 AT4G15210 BAM5 313
AT4G15560 cla1-1 Mutant seeds grown on medium supplemented with non-phosphorylated synthetic 1-deoxy-D-xylulose (DX) develop green leaves. 10982425 AT4G15560 DXS 219
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
AT4G15802 Athspb-2 Early flowering, reduced fertility, aborted seeds. 20388662 AT4G15802 HSBP 254
AT4G15880 esd4-2 Decreased mRNA levels of the floral repressors FLC and MAF4 and increased mRNA levels of the floral activators FT and SOC1. 17513499 AT4G15880 ESD4 265
AT4G16420 prz1-1 Altered response to auxin and cytokinin 12747832 AT4G16420 ADA2B 279
AT4G16480 atint4-2 No visible phenotype. 16603666 AT4G16480 INT4 284
AT5G10480 pas2-3 Segregates 25% embryo lethal. 18799749 AT5G10480 PAS2 301
AT5G10510 plt3-1 Short roots and shortened root meristem. 17960244 AT5G10510 AIL6 310
AT5G11110 kns2 Defects are specific to pollen exine structure. Smaller mesh size in the exine structure. Increased number of baculae. Fully fertile. 18779216 AT5G11110 SPS2 232
AT5G11260 hy5-101 Under FRc conditions, the length mutant hypocotyls is increased compared to that of wild-type plants. Under Rc conditions, the hypocotyl length is also increased and the cotyledon area is smaller. 16891401 AT5G11260 HY5 221
AT5G11510 myb3r4-1 No visible phenotype. 17287251 AT5G11510 MYB3R-4 336
AT5G12200 pyd2-2 The pyd2-2 mutant has a wild-type appearance under normal growth conditions. Pyrimidine nucleotide and uridine levels are not changed in the mutant, but uracil levels are increased. These mutants cannot grow normally when uracil is provided as a sole nitrogen source. 19413687 AT5G12200 PYD2 310
AT5G13290 crn-1 Increased meristem size. Vegetative meristems are are 30% larger than wild type. After bolting inflorescence meristems are enlarged and occasionally fasciated. Flowers occasionally produce extra organs in the first 3. 12345678 AT5G13290 CRN 189
Report of the Genes SKOR and MAA3:
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
Report of the Genes SKOR and MAA3:
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
Report of the Genes SKOR and MAA3:
AT3G02850 CS3816 The skor-1 mutant is sensitive to toxic cations in addition to K+ depletion. 17568770 AT3G02850 SKOR 234
Report of the Genes SKOR and MAA3:
AT4G15570 maa3 Homozygotes are not recovered. Female gametophyte development is delayed and asynchronous. During fertilization, fusion of polar nuclei does not occur. Polar nuclei nucloeli are smaller than WT. 18772186 AT4G15570 MAA3 294
Number of entries Chromosome 1[{'COUNT(*)': 4}]
Number of entries Chromosome 2[{'COUNT(*)': 4}]
Number of entries Chromosome 3 [{'COUNT(*)': 9}]
Number of entries Chromosome 4 [{'COUNT(*)': 8}]
Number of entries Chromosome 5[{'COUNT(*)': 4}]
Average ProteinLength in the Chromosome 1 is: [{'AVG(ProteinLength)': Decimal('258.7500')}]
Average ProteinLength in the Chromosome 2 is: [{'AVG(ProteinLength)': Decimal('215.5000')}]
Average ProteinLength in the Chromosome 3 is: [{'AVG(ProteinLength)': Decimal('252.0000')}]
Average ProteinLength in the Chromosome 4 is: [{'AVG(ProteinLength)': Decimal('277.5000')}]
Average ProteinLength in the Chromosome 5 is: [{'AVG(ProteinLength)': Decimal('271.2857')}]
###Markdown
Problem 1
###Code
%sql show databases
###Output
* mysql+pymysql://root:***@127.0.0.1:3306/mysql
4 rows affected.
|
src/Analysis.ipynb | ###Markdown
Expérience Malinois
###Code
embeddings.dist('malinois.n.01', 'belgian_sheepdog.n.01', type="poincare")
embeddings.dist('malinois.n.01', 'sea_cow.n.01', type="poincare")
embeddings.search_word("wh")
###Output
_____no_output_____
###Markdown
Expérience sur les normes
###Code
print(embeddings.norm('placental.n.01'))
print(embeddings.norm('mammal.n.01'))
print(embeddings.norm('canine.n.02'))
print(embeddings.norm('hunting_dog.n.01'))
print(embeddings.norm('sea_cow.n.01'))
print(embeddings.norm('white-tailed_jackrabbit.n.01'))
###Output
0.057099151795455196
0.04258903632447418
0.6567139014711089
0.7980162880156297
0.8773954569006239
0.9999801572141991
###Markdown
Data loading and preparation Loading required python libraries
###Code
import pandas as pd
# Loading and checking the training dataset
df_train = pd.read_csv('./../data/training_data.txt', header=None)
df_train.head()
# Loading and checking the test dataset
df_test = pd.read_csv('./../data/test_data_v0.txt', header=None)
df_test.head()
# Remove the unnecessary trailing tabs in test dataset
test = df_test[0].map(str.strip)
test.head()
# Convert the train dataset to a pandas series
train = df_train[0]
train.head()
# Spliting the training dataset into response and predictors
y_train = train.map(lambda x: x.split()[0])
x_train = train.map(lambda x: ' '.join(x.split()[1:]))
# Spliting the test dataset into response and predictors
y_test = test.map(lambda x: x.split()[0])
x_test = test.map(lambda x: ' '.join(x.split()[1:]))
###Output
_____no_output_____
###Markdown
Q1 Analysis Loading Required python libraries
###Code
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
# Run this if nltk is not configured before
# =========================================
# nltk.download()
# Define stop words
stop_words = set(stopwords.words('english'))
def removeStopWords(x):
"""Return only words that are not in stop_words"""
return [w for w in x if not w in stop_words]
def getLemma(x):
"""Return the lemma of each word"""
return [WordNetLemmatizer().lemmatize(w) for w in x]
# Tokenize each sentence in the training set, remove stop-words and take the lemma
x = x_train.map(word_tokenize).map(removeStopWords).map(getLemma)
###Output
_____no_output_____
###Markdown
Calculate Word Counts
###Code
# Get Unigram Word Counts
unigram_wcounts = x.groupby(y_train).apply(lambda x: [w for rec in x for w in rec]).map(nltk.FreqDist)
unigram_wcounts = pd.DataFrame(list(unigram_wcounts), index=unigram_wcounts.index)
unigram_wcounts
# Get Bigram Word Counts
bigram_wcounts = x.groupby(y_train).apply(lambda x: [w for rec in x for w in nltk.bigrams(rec)]).map(nltk.FreqDist)
bigram_wcounts = pd.DataFrame(list(bigram_wcounts), index=bigram_wcounts.index)
bigram_wcounts
# Get Trigram Word Counts
trigram_wcounts = x.groupby(y_train).apply(lambda x: [w for rec in x for w in nltk.trigrams(rec)]).map(nltk.FreqDist)
trigram_wcounts = pd.DataFrame(list(trigram_wcounts), index=trigram_wcounts.index)
trigram_wcounts
###Output
_____no_output_____
###Markdown
Calculate Total Word Counts
###Code
# Unigram total counts
unigram_total_wcount = unigram_wcounts.sum(axis=1)
unigram_total_wcount
# Bigram total counts
bigram_total_wcount = bigram_wcounts.sum(axis=1)
bigram_total_wcount
# Trigram total counts
trigram_total_wcount = trigram_wcounts.sum(axis=1)
trigram_total_wcount
###Output
_____no_output_____
###Markdown
Calculate Probabilities
###Code
unigram_probs = unigram_wcounts.div(unigram_total_wcount, axis=0)
unigram_probs
bigram_probs = bigram_wcounts.div(bigram_total_wcount, axis=0)
bigram_probs
trigram_probs = trigram_wcounts.div(trigram_total_wcount, axis=0)
trigram_probs
###Output
_____no_output_____
###Markdown
Predictions
###Code
def getUnigramProb(word):
try:
pProb = unigram_probs.loc['put', word]
except:
pProb = 0
try:
tProb = unigram_probs.loc['take', word]
except:
tProb = 0
return {
'pProb': pProb,
'tProb': tProb,
}
getUnigramProb('blue')
def getBigramProb(word):
try:
pProb = bigram_probs[word]['put']
except:
pProb = 0
try:
tProb = bigram_probs[word]['take']
except:
tProb = 0
return {
'pProb': pProb,
'tProb': tProb,
}
getBigramProb(('block', 'blue'))
def getTrigramProb(word):
try:
pProb = trigram_probs[word]['put']
except:
pProb = 0
try:
tProb = trigram_probs[word]['take']
except:
tProb = 0
return {
'pProb': pProb,
'tProb': tProb,
}
getTrigramProb(('block', 'circle', 'circle'))
# Prepare the test set
x2 = x_test.map(word_tokenize).map(removeStopWords).map(getLemma)
x2
def predict(sent, predType='uni'):
pProb = 0
tProb = 0
for w in sent:
if predType == 'uni':
p = getUnigramProb(w)
elif predType == 'bi':
p = getBigramProb(w)
else:
p = getTrigramProb(w)
pProb += p['pProb']
tProb += p['tProb']
res = 'put' if pProb > tProb else 'take'
return {
'prediction': res,
'pProb': pProb,
'tProb': tProb
}
unigram_prediction = x2.map(predict)
unigram_prediction
x2_bigram = x2.map(lambda x: list(nltk.bigrams(x)))
x2_bigram
bigram_prediction = x2_bigram.map(lambda x: predict(x, 'bi'))
bigram_prediction
x2_trigram = x2.map(lambda x: list(nltk.trigrams(x)))
x2_trigram
trigram_prediction = x2_trigram.map(lambda x: predict(x, 'tri'))
trigram_prediction
###Output
_____no_output_____
###Markdown
Analysis of the results Unigram
###Code
unigram_prediction_comparison = unigram_prediction.map(lambda x: x['prediction']) == y_test
unigram_prediction_comparison
unigram_test_accuracy = unigram_prediction_comparison.sum()/10
unigram_test_accuracy
unigram_train_accuracy = (x.map(predict).map(lambda x: x['prediction']) == y_train).sum()/len(x)
unigram_train_accuracy
###Output
_____no_output_____
###Markdown
Bigram
###Code
bigram_prediction_comparison = bigram_prediction.map(lambda x: x['prediction']) == y_test
bigram_prediction_comparison
bigram_test_accuracy = bigram_prediction_comparison.sum()/10
bigram_test_accuracy
bigram_train_accuracy = (x.map(lambda x: list(nltk.bigrams(x))).map(lambda x: predict(x, 'bi')).map(lambda x: x['prediction']) == y_train).sum()/len(x)
bigram_train_accuracy
trigram_prediction_comparison = trigram_prediction.map(lambda x: x['prediction']) == y_test
trigram_prediction_comparison
trigram_test_accuracy = trigram_prediction_comparison.sum()/10
trigram_test_accuracy
trigram_train_accuracy = (x.map(lambda x: list(nltk.trigrams(x))).map(lambda x: predict(x, 'tri')).map(lambda x: x['prediction']) == y_train).sum()/len(x)
trigram_train_accuracy
###Output
_____no_output_____
###Markdown
Desafio Data Science Serão feitos analises e exploração de dados para responder questões do teste.
###Code
# import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import confusion_matrix as cm
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression
%matplotlib inline
# read data
df = pd.read_csv('../data/bank-full.csv', sep= ";")
df.head()
# describe data
des = df.describe()
des.head()
# check if any column has null values
cols = df.columns
for col in cols:
print(col + ' ' + 'has values: ', df[cols].isnull().any())
###Output
age has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
job has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
marital has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
education has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
default has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
balance has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
housing has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
loan has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
contact has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
day has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
month has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
duration has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
campaign has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
pdays has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
previous has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
poutcome has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
y has values: age False
job False
marital False
education False
default False
balance False
housing False
loan False
contact False
day False
month False
duration False
campaign False
pdays False
previous False
poutcome False
y False
dtype: bool
###Markdown
1. Qual profissão tem mais tendência a fazer um empréstimo? De qual tipo?
###Code
# Qual % de clientes possui apenas um tipo de emprestimo e qual % de ambos e qual nao tem emprestimo nenhum
both = 0
housing = 0
loan = 0
noloan = 0
for i in range(0, len(df)):
if (df.iloc[i].housing == 'yes'):
if (df.iloc[i].loan == 'yes'):
both = both + 1
else:
housing = housing + 1
elif (df.iloc[i].loan == 'no'):
noloan = noloan + 1
else:
loan = loan + 1
bothp = round(both / len(df) * 100, 1)
housingp = round(housing / len(df) * 100, 1)
loanp = round(loan / len(df) * 100, 1)
noloanp = round(noloan / len(df) * 100, 1)
print('both: ' + str(bothp) + '%')
print('housing: ' + str(housingp) + '%')
print('loan: ' + str(loanp) + '%')
print('noloan: ' + str(noloanp) + '%')
# Add uma coluna adicional com o tipo de empréstimo de cada cliente
df1 = []
for i in range(0, len(df)):
if (df.iloc[i].housing == 'yes'):
if (df.iloc[i].loan == 'yes'):
df1.append(3)
else:
df1.append(1)
elif (df.iloc[i].loan == 'no'):
df1.append(0)
else:
df1.append(2)
df1 = pd.DataFrame(df1)
df = pd.concat([df, df1], axis = 1)
df.head()
# Histrograma de profissoes por tipo de emprestimo
prof = df[0].groupby([df['job'], df[0]]).count()
df_jl = pd.DataFrame(prof.unstack())
df_jl.columns = ['noloan', 'housing', 'loan', 'both']
df_jl.plot(kind='bar', title ="Histograma de emprestimos por job", figsize=(18, 6), legend=True, fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
Resposta Questao 1:Confirendo no histograma, podemos notar que tem mais tendência a fazer um empréstimo é a **blue-collar** e o tipo de empréstimo é o **housing** 2. Fazendo uma relação entre número de contatos e sucesso da campanha quais são os pontos relevantes a serem observados?
###Code
# Dataset e os gráficos de barra para encontrar as relações.
df_camp = []
df_cont = []
for i in range(0, len(df)):
if (df.iloc[i].y == 'yes'):
df_camp.append(df.iloc[i].campaign)
df_cont.append(df.iloc[i].contact)
df_camp = pd.DataFrame(df_camp)
df_cont = pd.DataFrame(df_cont)
df_camp = pd.concat([df_camp, df_cont], axis=1)
df_camp.columns=['nrcontacts', 'typecontacts']
cont = df_camp['nrcontacts'].groupby([df_camp['nrcontacts']]).count()
min(df_camp['nrcontacts'])
max(df_camp['nrcontacts'])
cont = pd.DataFrame(cont)
# Plot bar
sns.set(rc={'figure.figsize':(18,6)})
sns.barplot(x=cont.index, y=cont.nrcontacts)
for i in range(0, len(cont)):
nrcont = i + 1
perc = cont.iloc[i]/len(df_camp) * 100
perc = int(perc)
print('Nr. de contatos: ' + str(nrcont) + ' Porcentagem: ' + str(perc) + ' %')
perc = 0
###Output
Nr. de contatos: 1 Porcentagem: 48 %
Nr. de contatos: 2 Porcentagem: 26 %
Nr. de contatos: 3 Porcentagem: 11 %
Nr. de contatos: 4 Porcentagem: 5 %
Nr. de contatos: 5 Porcentagem: 2 %
Nr. de contatos: 6 Porcentagem: 1 %
Nr. de contatos: 7 Porcentagem: 0 %
Nr. de contatos: 8 Porcentagem: 0 %
Nr. de contatos: 9 Porcentagem: 0 %
Nr. de contatos: 10 Porcentagem: 0 %
Nr. de contatos: 11 Porcentagem: 0 %
Nr. de contatos: 12 Porcentagem: 0 %
Nr. de contatos: 13 Porcentagem: 0 %
Nr. de contatos: 14 Porcentagem: 0 %
Nr. de contatos: 15 Porcentagem: 0 %
Nr. de contatos: 16 Porcentagem: 0 %
Nr. de contatos: 17 Porcentagem: 0 %
Nr. de contatos: 18 Porcentagem: 0 %
Nr. de contatos: 19 Porcentagem: 0 %
Nr. de contatos: 20 Porcentagem: 0 %
Nr. de contatos: 21 Porcentagem: 0 %
Nr. de contatos: 22 Porcentagem: 0 %
###Markdown
Resposta Questão 2:Observando o gráfico de barras, podemos notar que o maior sucesso da campanha está**no primeiro e no segundo contato**. O terceiro contato ainda tem relevância com 11% aproximadamente, masa partir do quarto contato, o percentual de sucessos por número de contatos passa a não ter relevância. Ou seja, um grande numero de contatos nao aumenta a chance de sucesso. 3. Baseando-se nos resultados de adesão desta campanha qual o número médio e o máximo de ligações que você indica para otimizar a adesão?Para fazer uma análise das ligações, vamos verificar os modos de contato com o cliente que foram utilizados.Vamos reduzir o dataset e utilizar os dados de um número máximo de seis contatos pois atingem no total,93% do total de sucessos na campanha.
###Code
df_camp_sub = []
for j in range(0, len(df_camp)):
if (df_camp.iloc[j].nrcontacts <= 6):
df_camp_sub.append(df_camp.iloc[j])
df_camp_sub = pd.DataFrame(df_camp_sub)
camp_sub = df_camp_sub['typecontacts'].groupby([df_camp_sub['nrcontacts'], df_camp_sub['typecontacts']]).count()
camp_sub = pd.DataFrame(camp_sub)
camp_sub.columns = ['nrtypecontacts']
camp_sub.reset_index(inplace=True)
sns.set(rc={'figure.figsize':(18,6)})
ax = sns.barplot(x = 'nrcontacts', y = 'nrtypecontacts', hue = 'typecontacts', data=camp_sub)
###Output
_____no_output_____
###Markdown
Resposta Questão 3:Verificamos que o principal modo é através de ligações telefônicas,sejam através de celular ou linha fixa. Com isto, eu posso indicar que o número médio de**3 ligações pois atingiríamos 85% de sucesso e o número máximo de 6 ligações onde atingiríamos 93% de sucesso.** 4. O resultado da campanha anterior tem relevância na campanha atual?
###Code
# Bar plot about outcome
sns.set(rc={'figure.figsize':(18,6)})
sns.countplot(x=df['poutcome'], data=df)
###Output
_____no_output_____
###Markdown
Vamos definir duas hipotesis: - Hipótese 0 : A diferença entre as taxas é zero- Hipótese 1 : A diferença entre as taxas NÃO é zeroComo se trata de uma distribuição bimodal será utilizado o t_test
###Code
grouped4 = df.pivot_table(values='age', index='poutcome', columns='y', aggfunc='count')
grouped4.head()
data = df.copy()
filtering = (data['poutcome']=='failure') | (data['poutcome']=='success')
# Pega amostras com 'poutcome' válidos
ex2 = data[['poutcome','y']][filtering]
# Transforma 'y' e 'poutcome' em valores numéricos (label encoding)
bool_y, label_y = pd.factorize(ex2['y'])
bool_p, label_p = pd.factorize(ex2['poutcome'])
# Plot confusion matrix
df_cm = pd.DataFrame(cm(bool_p, bool_y), index=label_p, columns=label_p)
plt.figure(figsize = (18,6))
plot = sns.heatmap(df_cm, annot=True, annot_kws={"size": 18}, fmt="d")
plot.yaxis.set_ticklabels(plot.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plot.set_ylim([0,2])
plot.xaxis.set_ticklabels(plot.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=14)
sns.set(font_scale=1)
plt.ylabel('Previous Campaign')
plt.xlabel('Current Campaign')
plt.title("Confusion Matrix (Previous vs Current Result)\n", size=20)
plt.show()
###Output
_____no_output_____
###Markdown
Resposta Questao 4:Observando a matriz de confusão, podemos notar que há uma grande tendência de que indivíduos que recusaram a campanha anterior também recusarão a atual. Nota-se, também, que boa parcela dos sucessos na campanha anterior se mantém na campanha atual.A partir da análise, podemos assumir que o resultado da campanha anterior é relevante para atual. 5. Qual o fator determinante para que o banco exija um seguro de crédito?
###Code
# Features importantes
df5 = pd.DataFrame(data.iloc[:,:8])
# Separamos features e target variable
X = df5.drop('default', axis=1)
Y = df5[['default']]
# Trasformamos texto para dado numérico (label encoding)
for col in list(set(X.columns)-set(['age', 'balance'])):
X[col] = pd.factorize(X[col])[0]
# Instanciamos e treinamos o classificador
model = ExtraTreesClassifier()
model.fit(X, Y.values.ravel())
main_features = pd.Series(model.feature_importances_, index=X.columns)
plt.figure(figsize=(18, 6))
main_features.nlargest(5).plot(kind='barh')
plt.xlabel("Importance")
plt.ylabel("Features")
plt.title("Classifying Relevant Features", size=16)
plt.show()
###Output
_____no_output_____
###Markdown
Baseados na classificacao, e possivel inferir que as features **balance e age** sao as mais relevantes para a analise. Portanto, iremos agrupar os valores delas respeito a variavel **default**.
###Code
df = pd.DataFrame(data[['age','balance','default']])
# Plotando grupo 'YES'
plt.figure(figsize=(18, 6))
fig1, ax1 = plt.subplots(1, 2, sharex="col", figsize=(10,5))
fig1.suptitle("Yes Group", size=22)
ax1[0].title.set_text("Balance")
ax1[1].title.set_text("Age")
ax1[0].set_ylabel("Amount of Clients")
ax1[0].set_xlabel("Balance Bins")
ax1[1].set_xlabel("Age Bins")
df[df['default']=='yes']['balance'].hist(ax=ax1[0],bins=50)
df[df['default']=='yes']['age'].hist(ax=ax1[1], bins=50)
# Plotando grupo 'NO'
plt.figure(figsize=(18, 6))
fig2, ax2 = plt.subplots(1, 2, sharex="col", figsize=(10,5))
fig2.suptitle("No Group", size=22)
ax2[0].title.set_text("Balance")
ax2[1].title.set_text("Age")
ax2[0].set_ylabel("Amount of Clients")
ax2[0].set_xlabel("Balance Bins")
ax2[1].set_xlabel("Age Bins")
df[df['default']=='no']['balance'].hist(ax=ax2[0],bins=50)
df[df['default']=='no']['age'].hist(ax=ax2[1], bins=50)
plt.show()
###Output
_____no_output_____
###Markdown
Resposta Questao 5:O fator determinante e a idade do cliente, o banco oferece credito default para individuos que estejam na idade economica ativa **21<x<60**. 6. Quais são as características mais proeminentes de um cliente que possua empréstimo imobiliário?Nesta secao, usaremos: Weight of Evidence e Information Value.
###Code
def iv(df, col, targ):
event_total = len(df[df[targ]=='yes'])
nonevent_total = len(df[df[targ]=='no'])
elements = list(df[col].unique())
woe_vals = []
iv_vals = []
# Dividimos os dados em dois grupos: evento e não-evento
event = df[df[targ]=='yes']
nonevent = df[df[targ]=='no']
# Calculamos WoE e IV para cada uma das variaveis de uma coluna
for e in elements:
event_percent = len(event[event[col]==e])/event_total
nonevent_percent = len(nonevent[nonevent[col]==e])/event_total
woe_vals.append(np.log(event_percent/nonevent_percent))
iv_vals.append((event_percent-nonevent_percent)*woe_vals[-1])
return pd.DataFrame(zip(woe_vals, iv_vals), index=elements, columns=['woe','iv'])
# Pegamos apenas atributos úteis
df = pd.DataFrame(data.iloc[:,:8])
# Discretizamos 'balance' e 'age' (binning)
df['age_bin'] = pd.qcut(df['age'], 7)
df['balance_bin'] = pd.qcut(df['balance'], 10)
df = df.drop(labels=['balance','age'], axis=1)
df.head()
# Aplicando a funcao iv
iv_totals = []
index = list(df.drop('housing', axis=1).keys())
for att in index:
iv_totals.append(iv(df, att, 'housing')['iv'].sum())
df_iv = pd.DataFrame(iv_totals, index=index, columns=['IV'])
df_iv.sort_values(by='IV', ascending=False)
###Output
_____no_output_____ |
agla/demo/ipynb/c_komplexaufgabe.ipynb | ###Markdown
Einblick in das Rechnen mit aglavon Holger Böttcher - [email protected] Arbeit steht unter der freien Lizenz [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.de) Komplexaufgabe Cubus hyperbolicusDas Beispiel beinhaltet die Aufgabe 48 ausW. Herget, T. Jahnke, W. KrollProduktive Aufgaben für den Mathematikunterricht in der Sekundarstufe IICornelsen 2011Drei paarweise windschiefe und orthogonale Kanten eines Würfels werden nach beiden Seiten ins Unendliche verlängert. Diese drei Geraden sollen durch geradlinige Verbin-dungen miteinander verbunden werden1.$\,\,$ Entwickeln Sie eine Methode, mit der man systematisch die Befestigungspunkte für$\quad\,$solche Streben finden kann, und zeichnen Sie ein Schrägbild des Einheitswürfels mit $\quad\,$mehreren Streben 2.$\,\,$ Wenn man die Streben ebenso wie die drei Würfelkanten ins Unendliche verlän-$\quad\,$gert, erhält man eine Geradenschar. Untersuchen Sie die Schar dieser "Strebgera-$\quad\,$den" auf ihre Eigenschaften3.$\,\,$ Die Strebgeraden scheinen chaotisch im Raum verteilt zu sein. Tatsächlich gibt es $\quad\,$aber eine feste Richtung, die mit jeder Strebgeraden den gleichen Winkel bildet.$\quad\,$Bestimmen Sie diesen Winkel$\quad\,$Versuchen Sie, sich ein Bild von der räumlichen Lage der Geradenschar zu $\quad\,$machen
###Code
%run agla/start # Aktivierung des Paketes
###Output
_____no_output_____
###Markdown
Der WürfelEs wird ein Körper-Objekt erzeugt; ein Körper ist formal über eine Menge von Ecken und eine Menge von Kanten (Verbindungen zwischen den Ecken) definiert
###Code
ve = Viereck(v(1, 0, 0), v(0, 1, 0)) # Einfacher Weg zur Erzeu-
p = Prisma(ve, 1) # gung eines Würfels
würfel = w = p.in_körper # als Körper
e = w.ecken; e
w.kanten
###Output
_____no_output_____
###Markdown
Zu 1: Die ausgezeichneten Geraden
###Code
g1 = x_achse # x_achse ist vordefiniert
g2 = Gerade(e[5], v(0, 1, 0))
g3 = Gerade(e[3], v(0, 0, 1))
g1.prg
g2.prg
g3.prg
bb = [o.bild(schrägbild) for o in (w, g1, g2, g3)]
sicht_box(2) # ein 3D-Bild
zeichne(*bb, box=nein, achsen=nein)
zeichne(w, [g1, 3], [g2, 3, blau], [g3, 3, grün])
###Output
_____no_output_____
###Markdown
Befestigungspunkte der Strebgeraden
###Code
P = v(x, 0, 0); Q = v(1, y, 1); R = v(0, 1, z) # Ansatz
P, Q, R
###Output
_____no_output_____
###Markdown
Eine Bedingung an die Befestigungspunkte ergibt sich daraus, dass sie auf einer Geraden liegen sollen, also kollinear sind. Das bedeutet, es gilt $\; \vec{PR} = s \cdot \vec{PQ}\:$ mit irgendeinem $s$. Zur Lösung wird die rechte Seite zum Nullvektor gemacht und dann die linke Seite benutzt
###Code
gleichung = v(P, R) - s*v(P, Q)
löse(gleichung, [x, y, z])
###Output
_____no_output_____
###Markdown
Offensichtlich gibt es für $s=0\,$ und $s=1\,$ keine LösungDie berechneten Punkte sind
###Code
P = P.subs(x, s/(s-1)); Q = Q.subs(y, 1/s); R = R.subs(z, s)
P, Q, R
###Output
_____no_output_____
###Markdown
Zu 2: Die Strebgeradenschar(für alle $s$ außer $s \in \{0, 1\}$)
###Code
# Erzeugung der Geraden auf der Basis von P und Q
streb_gerade = sg = Gerade(P, v(P, Q))
sg
sg.prg
# Kontrolle durch Einsetzen in die Gleichung: auch R liegt auf
# allen Geraden der Schar
sg.Prg(R)
###Output
_____no_output_____
###Markdown
Betrachtung von zwei Elementen der Schar
###Code
s1, s2, t1, t2 = symbols('s_1, s_2, t_1, t_2')
s1, s2, t1, t2
sg1 = sg.sch_el(s1); sg2 = sg.sch_el(s2)
# sch_el - Scharelement-Methode
sg1.pkt(t1), sg2.pkt(t2)
###Output
_____no_output_____
###Markdown
Anhand dieser allgemeinen Punkte der beiden Scharelemente lassen sich die Überlegungen von Seite 228 des Buches leicht nachvollziehen: Für einen Schnittpunkt der beiden Geraden muss $t_1=t_2$ gelten ($z$-Komponente), aus der $y$-Komponente wird $s_1=s_2$ abgeleitet, woraus dann folgt, dass auch die Gleichungen anhand der $x$-Komponente erfüllt sind - die Geraden müßten identischsein; ebenso ist ableitbar, dass unterschiedliche Geraden keinen Schnittpunkt haben Kontrollgrafik (Animation)
###Code
ber = (-2.7, 0.75) # ca 8 sec
zeichne(g1, [g2, blau], [g3, grün], [sg, rot, 2, ber],
[P, 2, ber], [Q, 2, blau, ber], [R, 2, grün, ber],
achsen=nein, skalen=nein)
###Output
agla: lange Rechenzeit
###Markdown
Zu 3: Berechnung der festen Richtung
###Code
rv = v(a, b, c) # Ansatz für die gesuchte Richtung
###Output
_____no_output_____
###Markdown
Für einen konstanten Winkel muss das Skalarprodukt der Richtungseinheitsvektoren der Strebgeraden mit diesem Vektor konstant (gleich irgendeiner Konstanten $k$) sein
###Code
sp = sg.richt.einh_vekt ° rv
sp
aa = einfach(sp)
aa
n = s^4-2*s^3+3*s^2-2*s+1
n
###Output
_____no_output_____
###Markdown
Mit
###Code
factor(n) # SymPy-Anweisungen (factor, collect, ... )
# wurden nicht eingedeutscht
###Output
_____no_output_____
###Markdown
heben sich Quadrieren und Wurzelziehen auf, im Nenner verbleibt der Ausdruck in den Klammern: $\;s^2-s+1$Also müssen bei jedem $s$ die Größen
###Code
collect(-a*s + b*s - b + c*s^2 - c*s, s), expand(k * (s^2 - s + 1))
###Output
_____no_output_____
###Markdown
gleich sein; der Koeffizientenvergleich zeigt: das ist nur möglich bei
###Code
c = k; b = -k; a = -k
a, b, c
###Output
_____no_output_____
###Markdown
(die Werte für $b$ und $c$ sind ablesbar, $a$ ist danach aus $\;-a+b-c = -k\:$leicht zu berechnen)Damit ergibt sich für den gesuchten Richtungsvektor
###Code
rv = v(-k, -k, k)
rv
###Output
_____no_output_____
###Markdown
$k$ kann aus ihm eliminiert werden
###Code
rv = 1/k * rv
rv
###Output
_____no_output_____
###Markdown
Winkel der Strebgeraden mit diesem Vektor
###Code
cosdelta = sg.richt.einh_vekt * rv / rv.betrag
cosdelta
einfach(cosdelta) # das wird mit den obigen Überlegungen vereinfacht
cosdelta = sqrt(3)/3
cosdelta, arccosg(cosdelta), arccosg(cosdelta).n(4)
# (die verwendete agla-Funktion arccosg berechnet den Winkel in Grad)
###Output
_____no_output_____
###Markdown
Verhalten der ausgezeichneten Geradenbei der Rotation um eine der Würfeldiagonalen
###Code
diagonale = dg = Gerade(e[2], v(e[2], e[4])) # Würfeldiagonale
dg.prg
abb = drehung(dg, 120) # Winkel in Grad
identisch(g1.bild(abb), g2), identisch(g2.bild(abb), g3), \
identisch(g3.bild(abb), g1),
# die Geraden gehen bei einer Drehung um 120° ineinander über
###Output
_____no_output_____
###Markdown
Kontrollgrafik (Animation anhand einer Geraden)Die gemeinsame Darstellung aller 3 Geraden ist möglich, erfordert aber die 3-fache Rechenzeit
###Code
abb = drehung(dg, u) # u - allgemeiner Winkel
# g2 geht in g3 über; ca 10 sec
ber = 0, 120
zeichne([g2, 2, blau], [g3, 2, grün], [g2.bild(abb), 2, rot, ber], dg)
###Output
agla: lange Rechenzeit
###Markdown
Untersuchung der Schnittkurve der Strebgeraden mit der Ursprungsebene
###Code
E = Ebene(O, rv) # O ist der Ursprung, rv ist Normalenvektor
E.koord
S = sg.schnitt(E).einfach # die Schnittpunktschar
S
###Output
_____no_output_____
###Markdown
Eine Grafik lässt vermuten, dass alle Schnittpunkte auf einem Kreis liegen:
###Code
ss = [0, 12, 10, 50, 100, 1/2, 1/30, 1/100, -2, -10, -50, -100,
-1/2, -1/30, -1/100]
SS = [ S.sch_el(s) for s in ss ] # einige Punkte der Schar
sicht_box(3)
zeichne(E, *[p for p in SS] )
###Output
_____no_output_____
###Markdown
Nachweis, dass ein Kreis vorliegt und Ermittlung seiner Parameter
###Code
P, Q, R = [S.sch_el(s) for s in (0, 1, -1)] # 3 Punkte der Kurve
P, Q, R
mPQ = Ebene(1/2*(P+Q), v(P, Q)) # 2 Mittelsenkrechte
mQR = Ebene(1/2*(Q+R), v(Q, R)) # (Ebenen)
mPQ.koord, mQR.koord
h = mPQ.schnitt(mQR) # eine Gerade
M = h.schnitt(E)
M # der potentielle Mittelpunkt
einfach(M.abstand(S))
###Output
_____no_output_____
###Markdown
Alle Punkte der Schar haben den gleichen Abstand zu $M$; damit liegt ein Kreis vor, sein Mittelpunkt ist $M$, Radius ist der einheitliche Abstand
###Code
r = M.abstand(P); r
###Output
_____no_output_____
###Markdown
Untersuchung der Schnittkurve der Strebgeraden mit der allgemeinen Ebene
###Code
c = Symbol('c') # c wurde oben mit einem Wert belegt
E = Ebene(-1, -1, 1, -c) # Parallele zur obigen Ursprungsebene
E.koord
S = sg.schnitt(E); S
E.Koord(S) # Kontrolle durch Einsetzen, ob alle Punkte S in E liegen
###Output
_____no_output_____
###Markdown
Auswahl von drei Kurvenpunkten:
###Code
S.subs(s, 1) # s=1 ist ungeeignet; NaN - not a number
P, Q, R = [S.subs(s, t) for t in (0, 2, -1)]
P, Q, R
mPQ = Ebene(1/2*(P+Q), v(P, Q)) # 2 Mittelsenkrechte
mQR = Ebene(1/2*(Q+R), v(Q, R)) # (Ebenen)
mPQ.koord, mQR.koord
h = mPQ.schnitt(mQR)
h
h.prg
M = E.schnitt(h).einfach # der (potentielle) Mittelpunkt des Kreises
M
aa = S.abstand(M)^2 # das Quadrat läßt sich besser behandeln
aa
###Output
_____no_output_____
###Markdown
Der Ausdruck ist sehr komplex, er läßt sich jedoch problemlos vereinfachen; darüberhinaus hängt er nicht von $s$ ab:
###Code
rr = einfach(aa); rr
rr = factor(rr); rr
r = sqrt(rr); r
einfach(S.abstand(M)^2 - r^2) # Kontrolle
###Output
_____no_output_____
###Markdown
Der ermittelte Wert gilt somit für alle $s$. Damit ist nachgewiesen, dassauch im allgemeinen Fall ein Kreis vorliegt, er hat den Mittelpunkt $M$ und den Radius $r$, seine Trägerebene ist $E$
###Code
k = Kreis(E, M, r); k
k.M, k.r
###Output
_____no_output_____
###Markdown
Ortskurve der Kreismittelpunkte
###Code
ok = Gerade(k.M)
ok.prg
identisch(ok, dg) # sie ist die Würfeldiagonale
###Output
_____no_output_____
###Markdown
Kontrollgrafik mit einigen Kreisen
###Code
cc = [-i*0.5 for i in range(8)] + [i*0.5 for i in range(8)]
K = [k.sch_el(c) for c in cc]
zeichne([dg, 2], *K, skalen=nein)
###Output
_____no_output_____
###Markdown
Eine AnimationDie über die Kreisschar erzeugte Fläche ist ein Hyperboloid; es wird sichtbar gemacht, dass die ausgezeichneten Geraden zu seinen Erzeugenden gehören
###Code
# Der allgemeine Punkt der Fläche mit den Parametern c (für das
# Element der Kreisschar) und t (für den Kreispunkt; Winkel in Grad)
k.pkt(t)
ff = Fläche(k.pkt(t), (t, 0, 360), (c, -3, 3))
abb # oben definiert
ber = (u, 0, 360)
zeichne([ff, gelb], # ca. 10 sec für eine Gerade,
[g1.bild(abb), 2, rot, ber], # ca. 30 sec für alle drei
[g2.bild(abb), 2, grün, ber],
[g3.bild(abb), 2, blau, ber],
achsen=nein, box=nein)
###Output
agla: lange Rechenzeit
agla: lange Rechenzeit
agla: lange Rechenzeit
|
MIO/alberto bootcamp/(Bootcamp)-02-03-for Loops.ipynb | ###Markdown
______Content Copyright by Pierian Data for LoopsA for loop acts as an iterator in Python; it goes through items that are in a *sequence* or any other iterable item. Objects that we've learned about that we can iterate over include strings, lists, tuples, and even built-in iterables for dictionaries, such as keys or values.We've already seen the for statement a little bit in past lectures but now let's formalize our understanding.Here's the general format for a for loop in Python: for item in object: statements to do stuff The variable name used for the item is completely up to the coder, so use your best judgment for choosing a name that makes sense and you will be able to understand when revisiting your code. This item name can then be referenced inside your loop, for example if you wanted to use if statements to perform checks.Let's go ahead and work through several example of for loops using a variety of data object types. We'll start simple and build more complexity later on. Example 1Iterating through a list
###Code
# We'll learn how to automate this sort of list in the next lecture
list1 = [1,2,3,4,5,6,7,8,9,10]
for num in list1:
print(num)
###Output
1
2
3
4
5
6
7
8
9
10
###Markdown
Great! Hopefully this makes sense. Now let's add an if statement to check for even numbers. We'll first introduce a new concept here--the modulo. ModuloThe modulo allows us to get the remainder in a division and uses the % symbol. For example:
###Code
17 % 5
###Output
_____no_output_____
###Markdown
This makes sense since 17 divided by 5 is 3 remainder 2. Let's see a few more quick examples:
###Code
# 3 Remainder 1
10 % 3
# 2 Remainder 4
18 % 7
# 2 no remainder
4 % 2
###Output
_____no_output_____
###Markdown
Notice that if a number is fully divisible with no remainder, the result of the modulo call is 0. We can use this to test for even numbers, since if a number modulo 2 is equal to 0, that means it is an even number!Back to the for loops! Example 2Let's print only the even numbers from that list!
###Code
for num in list1:
if num % 2 == 0:
print(num)
###Output
2
4
6
8
10
###Markdown
We could have also put an else statement in there:
###Code
for num in list1:
if num % 2 == 0:
print(num)
else:
print('Odd number')
###Output
Odd number
2
Odd number
4
Odd number
6
Odd number
8
Odd number
10
###Markdown
Example 3Another common idea during a for loop is keeping some sort of running tally during multiple loops. For example, let's create a for loop that sums up the list:
###Code
# Start sum at zero
list_sum = 0
for num in list1:
list_sum = list_sum + num
print(list_sum)
###Output
55
###Markdown
Great! Read over the above cell and make sure you understand fully what is going on. Also we could have implemented a += to perform the addition towards the sum. For example:
###Code
# Start sum at zero
list_sum = 0
for num in list1:
list_sum += num
print(list_sum)
###Output
55
###Markdown
Example 4We've used for loops with lists, how about with strings? Remember strings are a sequence so when we iterate through them we will be accessing each item in that string.
###Code
for letter in 'This is a string.':
print(letter)
###Output
T
h
i
s
i
s
a
s
t
r
i
n
g
.
###Markdown
Example 5Let's now look at how a for loop can be used with a tuple:
###Code
tup = (1,2,3,4,5)
for t in tup:
print(t)
###Output
1
2
3
4
5
###Markdown
Example 6Tuples have a special quality when it comes to for loops. If you are iterating through a sequence that contains tuples, the item can actually be the tuple itself, this is an example of *tuple unpacking*. During the for loop we will be unpacking the tuple inside of a sequence and we can access the individual items inside that tuple!
###Code
list2 = [(2,4),(6,8),(10,12)]
for tup in list2:
print(tup)
# Now with unpacking!
for (t1,t2) in list2:
print(t1)
###Output
2
6
10
###Markdown
Cool! With tuples in a sequence we can access the items inside of them through unpacking! The reason this is important is because many objects will deliver their iterables through tuples. Let's start exploring iterating through Dictionaries to explore this further! Example 7
###Code
d = {'k1':1,'k2':2,'k3':3}
for item in d:
print(item)
###Output
k1
k2
k3
###Markdown
Notice how this produces only the keys. So how can we get the values? Or both the keys and the values? We're going to introduce three new Dictionary methods: **.keys()**, **.values()** and **.items()**In Python each of these methods return a *dictionary view object*. It supports operations like membership test and iteration, but its contents are not independent of the original dictionary – it is only a view. Let's see it in action:
###Code
# Create a dictionary view object
d.items()
###Output
_____no_output_____
###Markdown
Since the .items() method supports iteration, we can perform *dictionary unpacking* to separate keys and values just as we did in the previous examples.
###Code
# Dictionary unpacking
for k,v in d.items():
print(k)
print(v)
###Output
k1
1
k2
2
k3
3
###Markdown
If you want to obtain a true list of keys, values, or key/value tuples, you can *cast* the view as a list:
###Code
list(d.keys())
###Output
_____no_output_____
###Markdown
Remember that dictionaries are unordered, and that keys and values come back in arbitrary order. You can obtain a sorted list using sorted():
###Code
sorted(d.values())
###Output
_____no_output_____ |
02-Python-for-Data-Analysis-NumPy/04-Numpy Exercises.ipynb | ###Markdown
Get the standard deviation of the values in mat
###Code
mat.std()
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
mat.sum(axis=1)
mat.sum(axis=0)
###Output
_____no_output_____
###Markdown
___ ___ NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
np.zeros(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
np.ones((10,10))
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
np.zeros(10) + 5
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
#np.arange(10,51,1)
np.linspace(10,50,41)
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
np.arange(10,51,2)
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.arange(9).reshape(3,3)
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
np.eye(3,3)
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
np.random.rand(1)
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
np.random.randn(25)
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
np.linspace(0.01,1,100).reshape(10,10)
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
np.linspace(0,1,20)
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
np.arange(1,10)
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
arr = np.arange(12,27).reshape(3,5)[:,0:4]
arr
mat[2:,1:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
arr[1,3]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
np.arange(2,13,5).reshape(3,1)
mat[:3,1:2]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[-1]
mat[4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3:5]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
mat
mat.sum()
###Output
_____no_output_____
###Markdown
NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
np.zeros(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
np.ones(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
np.ones(10)*5
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
np.arange(10,51)
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
np.arange(10,51,2)
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.arange(0,9).reshape(3,3)
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
np.random.rand(1)
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
np.random.randn(25)
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
np.arange(1,101).reshape(10,10)/100
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
np.linspace(0,1,20)
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[2:,1:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[:3,1:2]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[4] # mat[4,] mat[4,:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3:, :]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
mat.sum()
###Output
_____no_output_____
###Markdown
Get the standard deviation of the values in mat
###Code
mat.std()
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
mat.sum(axis=0)
###Output
_____no_output_____
###Markdown
___ ___ NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
np.zeros(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
np.ones(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
np.ones(10)*5
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
np.arange(10,51)
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
np.arange(10,51,2)
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.arange(9).reshape(3,3)
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
np.random.rand(1)
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
np.random.randn(25)
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
a=np.arange(1,101)/100
a
a.reshape(10,10)
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
np.linspace(0,1,20)
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[2:,1:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[:3,1:2]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[4,:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3:,:]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
mat.sum()
###Output
_____no_output_____
###Markdown
Get the standard deviation of the values in mat
###Code
mat.std()
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
mat.sum(axis=0)
###Output
_____no_output_____
###Markdown
___ ___ NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np Create an array of 10 zeros Create an array of 10 ones Create an array of 10 fives Create an array of the integers from 10 to 50 Create an array of all the even integers from 10 to 50 Create a 3x3 matrix with values ranging from 0 to 8 Create a 3x3 identity matrix Use NumPy to generate a random number between 0 and 1 Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution Create the following matrix: Create an array of 20 linearly spaced points between 0 and 1: Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
###Output
_____no_output_____
###Markdown
___ ___ NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np Create an array of 10 zeros Create an array of 10 ones Create an array of 10 fives Create an array of the integers from 10 to 50 Create an array of all the even integers from 10 to 50 Create a 3x3 matrix with values ranging from 0 to 8 Create a 3x3 identity matrix Use NumPy to generate a random number between 0 and 1 Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution Create the following matrix: Create an array of 20 linearly spaced points between 0 and 1: Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
###Output
_____no_output_____
###Markdown
___ ___ NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
np.zeros(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
np.ones(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
np.ones(10)*5
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
np.arange(10,51)
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
np.arange(10,51,2)
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.arange(0,9).reshape(3,3)
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
np.eye(3,3)
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
np.random.rand(1)
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
np.random.rand(25)
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
np.arange(1,101).reshape(10,10)/100
np.linspace(0.01,1,100).reshape(10,10)
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
np.linspace(0,1,20)
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
mat = np.arange(1,26).reshape(5,5)
mat
mat[2:,1:]
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[0:3,1]
mat[4]
mat[3:]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
np.sum(mat)
###Output
_____no_output_____
###Markdown
Get the standard deviation of the values in mat
###Code
np.std(mat)
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
mat.sum(axis=0)
###Output
_____no_output_____ |
1.Data_Analysis.ipynb | ###Markdown
Initial Data Analysis
###Code
raw_df = pd.read_csv(r'data\raw_ckd.csv')
raw_df.head()
corr = raw_df.corr()
corr
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 15))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.title('Correlations between different predictors')
plt.show()
###Output
_____no_output_____
###Markdown
There seems to be quite a few correlated features. Best features are to be selected by applying appropriate Feature scaling technique
###Code
raw_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 400 entries, 0 to 399
Data columns (total 25 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 391 non-null float64
1 bp 388 non-null float64
2 sg 353 non-null float64
3 al 354 non-null float64
4 su 351 non-null float64
5 rbc 248 non-null object
6 pc 335 non-null object
7 pcc 396 non-null object
8 ba 396 non-null object
9 bgr 356 non-null float64
10 bu 381 non-null float64
11 sc 383 non-null float64
12 sod 313 non-null float64
13 pot 312 non-null float64
14 hemo 348 non-null float64
15 pcv 329 non-null float64
16 wbcc 294 non-null float64
17 rbcc 269 non-null float64
18 htn 398 non-null object
19 dm 398 non-null object
20 cad 398 non-null object
21 appet 399 non-null object
22 pe 399 non-null object
23 ane 399 non-null object
24 class 400 non-null object
dtypes: float64(14), object(11)
memory usage: 78.2+ KB
###Markdown
Target variable(class) visualization for Categorical columns
###Code
# Red Blood cell
data = raw_df.groupby(['rbc','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'abnormal' rbc belongs to class ckd(positive class). Cases with 'normal' rbc too belong to ckd class
###Code
# Pus cell
data = raw_df.groupby(['pc','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'abnormal' pc belongs to class ckd(positive class).
###Code
# Pus Cell Clumps
data = raw_df.groupby(['pcc','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'present' pcc belongs to class ckd(positive class).
###Code
# Bacteria
data = raw_df.groupby(['ba','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'present' ba belongs to class ckd(positive class).
###Code
# Hypertension
data = raw_df.groupby(['htn','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' htn belongs to class ckd(positive class).
###Code
# Diabetes Mellitus
data = raw_df.groupby(['dm','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' dm belongs to class ckd(positive class).
###Code
# Coronary Artery Disease
data = raw_df.groupby(['cad','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' cad belongs to class ckd(positive class).
###Code
# Appetite
data = raw_df.groupby(['appet','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'poor' appet belongs to class ckd(positive class).
###Code
# Pedal Edema
data = raw_df.groupby(['pe','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' pe belongs to class ckd(positive class).
###Code
# Anemia
data = raw_df.groupby(['ane','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' ane belongs to class ckd(positive class).
###Code
# Anemia
data = raw_df.groupby(['ane','class']).size().unstack(level=1)
data.plot(kind = 'bar')
###Output
_____no_output_____
###Markdown
Observations:In the data set all cases with 'yes' ane belongs to class ckd(positive class).
###Code
# Disease detection
data = raw_df.groupby(['class']).size()#.unstack(level=1)
data.plot(kind = 'bar')
raw_df['class'].value_counts()
###Output
_____no_output_____ |
fuzzy_classifier.ipynb | ###Markdown
Iris dataset
###Code
df = pd.read_csv('iris.data', header=None, names=['sepal length', 'sepal width', 'petal length', 'petal width', 'class'])
df = df[~(df['class']=='Iris-virginica')]
df.head()
df.replace(to_replace='Iris-setosa', value=0, inplace=True)
df.replace(to_replace='Iris-versicolor', value=1, inplace=True)
df = df.sample(frac=1)
X_train = df[['sepal length', 'petal length']].values
Y_train = df['class'].values
_max = np.max(X_train, axis=0)
_min = np.min(X_train, axis=0)
X_train = (X_train - _min) / (_max - _min)
X_test, Y_test = X_train[-20:], Y_train[-20:]
X_train, Y_train = X_train[:-20], Y_train[:-20]
X_train.shape, Y_train.shape
clf2 = FuzzyMMC(sensitivity=1, exp_bound=0.1, animate=True)
clf2.fit(X_train, Y_train)
clf2.score(X_test, Y_test)
_ = clf2.animate()
import io
import base64
from IPython.display import HTML
video = io.open('blog_data/fuzzy_animation_iris.mp4', 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
###Output
_____no_output_____
###Markdown
Generated dataset
###Code
def rand_uniform(x, y):
return random.random() * (y- x) + x
def gen_circle(num_samples):
num_samples = num_samples
radius = 5
points = []
noise = 0.5
def get_label(x, y):
dist = math.sqrt(x ** 2 + y ** 2)
return 0 if dist < radius * 0.5 else 1
for i in range(num_samples//2):
r = rand_uniform(0, radius * 0.5)
angle = rand_uniform(0, 2 * math.pi)
x = r * math.sin(angle)
y = r * math.cos(angle)
noiseX = rand_uniform((-1)*radius, radius) * noise
noiseY = rand_uniform((-1)*radius, radius) * noise
#label = get_label(noiseX, noiseY)
label = 0
points.append((x, y, label))
for i in range(num_samples // 2):
r = rand_uniform(radius * 0.7, radius)
angle = rand_uniform(0, 2 * math.pi)
x = r * math.sin(angle)
y = r * math.cos(angle)
noiseX = rand_uniform((-1)*radius, radius) * noise
noiseY = rand_uniform((-1)*radius, radius) * noise
#label = get_label(noiseX, noiseY)
label = 1
points.append((x, y, label))
return points
o = gen_circle(100)
o = np.array(o)
train = pd.DataFrame(o)
train_x = []
train_y = []
np.random.shuffle(o)
for i in o:
train_x.append([i[0], i[1]])
train_y.append(i[2])
train_x = np.array(train_x)
train_y = np.array(train_y)
_max = np.max(train_x, axis=0)
_min = np.min(train_x, axis=0)
train_x = (train_x - _min) / (_max - _min)
train_x.shape, train_y.shape
for x, y in zip(train_x, train_y):
if y == 0:
plt.scatter(x[0], x[1], c='r', alpha=0.8)
else:
plt.scatter(x[0], x[1], c='b', alpha=0.8)
plt.ylim([0, 1])
plt.show()
a = FuzzyMMC(sensitivity=1, exp_bound=0.7, animate=True)
a.fit(train_x, train_y)
a.score(train_x, train_y)
_ = a.animate(frame_rate=5)
import io
import base64
from IPython.display import HTML
video = io.open('blog_data/fuzzy_animation_circle.mp4', 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
###Output
_____no_output_____ |
python-sdk/tutorials/automl-with-azureml/forecasting-many-models/auto-ml-forecasting-many-models.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"max_horizon": 6,
"grain_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
output["SDK Version"] = azureml.core.VERSION
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"forecast_horizon": 6,
"time_series_id_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"max_horizon": 6,
"grain_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"max_horizon": 6,
"grain_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"forecast_horizon": 6,
"time_series_id_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"max_horizon": 6,
"grain_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
output["SDK Version"] = azureml.core.VERSION
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
2.4 Configure data with ``OutputFileDatasetConfig`` objectsThis step shows how to configure output data from a pipeline step. One of the use cases for this step is when you want to do some preprocessing before feeding the data to training step. Intermediate data (or output of a step) is represented by an ``OutputFileDatasetConfig`` object. ``output_data`` is produced as the output of a step. Optionally, this data can be registered as a dataset by calling the ``register_on_complete`` method. If you create an ``OutputFileDatasetConfig`` in one step and use it as an input to another step, that data dependency between steps creates an implicit execution order in the pipeline.``OutputFileDatasetConfig`` objects return a directory, and by default write output to the default datastore of the workspace.Since instance creation for class ``OutputTabularDatasetConfig`` is not allowed, we first create an instance of this class. Then we use the ``read_parquet_files`` method to read the parquet file into ``OutputTabularDatasetConfig``.
###Code
from azureml.data.output_dataset_config import OutputFileDatasetConfig
output_data = OutputFileDatasetConfig(
name="processed_data", destination=(dstore, "outputdataset/{run-id}/{output-name}")
).as_upload()
# output_data_dataset = output_data.register_on_complete(
# name='processed_data', description = 'files from prev step')
output_data = output_data.read_parquet_files()
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Configure the training run's environmentThe next step is making sure that the remote training run has all the dependencies needed by the training steps. Dependencies and the runtime context are set by creating and configuring a RunConfiguration object.The code below shows two options for handling dependencies. As presented, with ``USE_CURATED_ENV = True``, the configuration is based on a [curated environment](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments). Curated environments have prebuilt Docker images in the [Microsoft Container Registry](https://hub.docker.com/publishers/microsoftowner). For more information, see [Azure Machine Learning curated environments](https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments).The path taken if you change ``USE_CURATED_ENV`` to False shows the pattern for explicitly setting your dependencies. In that scenario, a new custom Docker image will be created and registered in an Azure Container Registry within your resource group (see [Introduction to private Docker container registries in Azure](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro)). Building and registering this image can take quite a few minutes.
###Code
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core import Environment
aml_run_config = RunConfiguration()
aml_run_config.target = compute_target
USE_CURATED_ENV = True
if USE_CURATED_ENV:
curated_environment = Environment.get(
workspace=ws, name="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu"
)
aml_run_config.environment = curated_environment
else:
aml_run_config.environment.python.user_managed_dependencies = False
# Add some packages relied on by data prep step
aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(
conda_packages=["pandas", "scikit-learn"],
pip_packages=["azureml-sdk", "azureml-dataset-runtime[fuse,pandas]"],
pin_sdk_version=False,
)
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"forecast_horizon": 6,
"time_series_id_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Construct your pipeline stepsOnce you have the compute resource and environment created, you're ready to define your pipeline's steps. There are many built-in steps available via the Azure Machine Learning SDK, as you can see on the [reference documentation for the azureml.pipeline.steps package](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py). The most flexible class is [PythonScriptStep](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py), which runs a Python script.Your data preparation code is in a subdirectory (in this example, "data_preprocessing_tabular.py" in the directory "./scripts"). As part of the pipeline creation process, this directory is zipped and uploaded to the compute_target and the step runs the script specified as the value for ``script_name``.The ``arguments`` values specify the inputs and outputs of the step. In the example below, the baseline data is the ``input_ds_small`` dataset. The script data_preprocessing_tabular.py does whatever data-transformation tasks are appropriate to the task at hand and outputs the data to ``output_data``, of type ``OutputFileDatasetConfig``. For more information, see [Moving data into and between ML pipeline steps (Python)](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-move-data-in-out-of-pipelines). The step will run on the machine defined by ``compute_target``, using the configuration ``aml_run_config``.Reuse of previous results (``allow_reuse``) is key when using pipelines in a collaborative environment since eliminating unnecessary reruns offers agility. Reuse is the default behavior when the ``script_name``, ``inputs``, and the parameters of a step remain the same. When reuse is allowed, results from the previous run are immediately sent to the next step. If ``allow_reuse`` is set to False, a new run will always be generated for this step during pipeline execution.> Note that we only support partitioned FileDataset and TabularDataset without partition when using such output as input.
###Code
from azureml.pipeline.steps import PythonScriptStep
dataprep_source_dir = "./scripts"
entry_point = "data_preprocessing_tabular.py"
ds_input = input_ds_small.as_named_input("train_10_models")
data_prep_step = PythonScriptStep(
script_name=entry_point,
source_directory=dataprep_source_dir,
arguments=["--input", ds_input, "--output", output_data],
compute_target=compute_target,
runconfig=aml_run_config,
allow_reuse=False,
)
input_ds_small = output_data
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Many Models - Automated ML**_Generate many models time series forecasts with Automated Machine Learning_**--- For this notebook we are using a synthetic dataset portraying sales data to predict the the quantity of a vartiety of product skus across several states, stores, and product categories.**NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).** PrerequisitesYou'll need to create a compute Instance by following the instructions in the [EnvironmentSetup.md](../Setup_Resources/EnvironmentSetup.md). 1.0 Set up workspace, datastore, experiment
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# Set up your workspace
ws = Workspace.from_config()
ws.get_details()
# Set up your datastores
dstore = ws.get_default_datastore()
output = {}
output["SDK version"] = azureml.core.VERSION
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Default datastore name"] = dstore.name
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
###Output
_____no_output_____
###Markdown
Choose an experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, "automl-many-models")
print("Experiment name: " + experiment.name)
###Output
_____no_output_____
###Markdown
2.0 DataThis notebook uses simulated orange juice sales data to walk you through the process of training many models on Azure Machine Learning using Automated ML. The time series data used in this example was simulated based on the University of Chicago's Dominick's Finer Foods dataset which featured two years of sales of 3 different orange juice brands for individual stores. The full simulated dataset includes 3,991 stores with 3 orange juice brands each thus allowing 11,973 models to be trained to showcase the power of the many models pattern. In this notebook, two datasets will be created: one with all 11,973 files and one with only 10 files that can be used to quickly test and debug. For each dataset, you'll be walked through the process of:1. Registering the blob container as a Datastore to the Workspace2. Registering a tabular dataset to the Workspace 2.1 Data PreparationThe OJ data is available in the public blob container. The data is split to be used for training and for inferencing. For the current dataset, the data was split on time column ('WeekStarting') before and after '1992-5-28' .The container has 'oj-data-tabular' and 'oj-inference-tabular' folders that contains training and inference data respectively for the 11,973 models. It also has 'oj-data-small-tabular' and 'oj-inference-small-tabular' folders that has training and inference data for 10 models.To create the [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) needed for the ParallelRunStep, you first need to register the blob container to the workspace. To use your own data, put your own data in a blobstore folder. As shown it can be one file or multiple files. We can then register datastore using that blob as shown below. How sample data in blob store looks like['oj-data-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-data-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container)['oj-inference-small-tabular'](https://ms.portal.azure.com/blade/Microsoft_Azure_Storage/ContainerMenuBlade/overview/storageAccountId/%2Fsubscriptions%2F102a16c3-37d3-48a8-9237-4c9b1e8e80e0%2FresourceGroups%2FAutoMLSampleNotebooksData%2Fproviders%2FMicrosoft.Storage%2FstorageAccounts%2Fautomlsamplenotebookdata/path/automl-sample-notebook-data/etag/%220x8D84EAA65DE50B7%22/defaultEncryptionScope/%24account-encryption-key/denyEncryptionScopeOverride//defaultId//publicAccessVal/Container) 2.2 Register the blob container as DataStoreA Datastore is a place where data can be stored that is then made accessible to a compute either by means of mounting or copying the data to the compute target.Please refer to [Datastore](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.datastore(class)?view=azure-ml-py) documentation on how to access data from Datastore.In this next step, we will be registering blob storage as datastore to the Workspace.
###Code
from azureml.core import Datastore
# Please change the following to point to your own blob container and pass in account_key
blob_datastore_name = "automl_many_models"
container_name = "automl-sample-notebook-data"
account_name = "automlsamplenotebookdata"
oj_datastore = Datastore.register_azure_blob_container(
workspace=ws,
datastore_name=blob_datastore_name,
container_name=container_name,
account_name=account_name,
create_if_not_exists=True,
)
###Output
_____no_output_____
###Markdown
2.3 Using tabular datasets Now that the datastore is available from the Workspace, [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py) can be created. Datasets in Azure Machine Learning are references to specific data in a Datastore. We are using TabularDataset, so that users who have their data which can be in one or many files (*.parquet or *.csv) and have not split up data according to group columns needed for training, can do so using out of box support for 'partiion_by' feature of TabularDataset shown in section 5.0 below.
###Code
from azureml.core import Dataset
ds_name_small = "oj-data-small-tabular"
input_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(ds_name_small + "/"), validate=False
)
inference_name_small = "oj-inference-small-tabular"
inference_ds_small = Dataset.Tabular.from_delimited_files(
path=oj_datastore.path(inference_name_small + "/"), validate=False
)
###Output
_____no_output_____
###Markdown
3.0 Build the training pipelineNow that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training.> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. Choose a compute targetYou will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-set-up-training-targetsamlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\*\*Creation of AmlCompute takes approximately 5 minutes.**If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this [article](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-quotas) on the default limits and how to request more quota.
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
# Name your cluster
compute_name = "mm-compute"
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print("Found compute target: " + compute_name)
else:
print("Creating a new compute target...")
provisioning_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_D16S_V3", max_nodes=20
)
# Create the compute target
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(
show_output=True, min_node_count=None, timeout_in_minutes=20
)
# For a more detailed view of current cluster status, use the 'status' property
print(compute_target.status.serialize())
###Output
_____no_output_____
###Markdown
Set up training parametersThis dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings inncluding the name of the time column, the maximum forecast horizon, and the partition column name definition.| Property | Description|| :--------------- | :------------------- || **task** | forecasting || **primary_metric** | This is the metric that you want to optimize. Forecasting supports the following primary metrics spearman_correlationnormalized_root_mean_squared_errorr2_scorenormalized_mean_absolute_error || **blocked_models** | Blocked models won't be used by AutoML. || **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. || **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. || **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. || **label_column_name** | The name of the label column. || **forecast_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. || **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. || **enable_early_stopping** | Flag to enable early termination if the score is not improving in the short term. || **time_column_name** | The name of your time column. || **enable_engineered_explanations** | Engineered feature explanations will be downloaded if enable_engineered_explanations flag is set to True. By default it is set to False to save storage space. || **time_series_id_column_name** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. || **track_child_runs** | Flag to disable tracking of child runs. Only best run is tracked if the flag is set to False (this includes the model and metrics of the run). || **pipeline_fetch_max_batch_size** | Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale. || **partition_column_names** | The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series. |
###Code
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsTrainParameters,
)
partition_column_names = ["Store", "Brand"]
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 0.25,
"label_column_name": "Quantity",
"n_cross_validations": 3,
"time_column_name": "WeekStarting",
"drop_column_names": "Revenue",
"max_horizon": 6,
"grain_column_names": partition_column_names,
"track_child_runs": False,
}
mm_paramters = ManyModelsTrainParameters(
automl_settings=automl_settings, partition_column_names=partition_column_names
)
###Output
_____no_output_____
###Markdown
Set up many models pipeline Parallel run step is leveraged to train multiple models at once. To configure the ParallelRunConfig you will need to determine the appropriate number of workers and nodes for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for training. || **train_data** | The file dataset to be used as input to the training run. || **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. || **process_count_per_node** | Process count per node, we recommend 2:1 ratio for number of cores: number of processes per node. eg. If node has 16 cores then configure 8 or less process count per node or optimal performance. || **train_pipeline_parameters** | The set of configuration parameters defined in the previous section. |Calling this method will create a new aggregated dataset which is generated dynamically on pipeline execution.
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
training_pipeline_steps = AutoMLPipelineBuilder.get_many_models_train_steps(
experiment=experiment,
train_data=input_ds_small,
compute_target=compute_target,
node_count=2,
process_count_per_node=8,
run_invocation_timeout=920,
train_pipeline_parameters=mm_paramters,
)
from azureml.pipeline.core import Pipeline
training_pipeline = Pipeline(ws, steps=training_pipeline_steps)
###Output
_____no_output_____
###Markdown
Submit the pipeline to runNext we submit our pipeline to run. The whole training pipeline takes about 40m using a STANDARD_D16S_V3 VM with our current ParallelRunConfig setting.
###Code
training_run = experiment.submit(training_pipeline)
training_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Check the run status, if training_run is in completed state, continue to forecasting. If training_run is in another state, check the portal for failures. 5.0 Publish and schedule the train pipeline (Optional) 5.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = training_pipeline.publish(name = 'automl_train_many_models',
# description = 'train many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# training_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule",
# description="Schedule Training Pipeline to run on the first day of every month",
# pipeline_id=training_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____
###Markdown
6.0 Forecasting Set up output dataset for inference dataOutput of inference can be represented as [OutputFileDatasetConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.output_dataset_config.outputdatasetconfig?view=azure-ml-py) object and OutputFileDatasetConfig can be registered as a dataset.
###Code
from azureml.data import OutputFileDatasetConfig
output_inference_data_ds = OutputFileDatasetConfig(
name="many_models_inference_output", destination=(dstore, "oj/inference_data/")
).register_on_complete(name="oj_inference_data_ds")
###Output
_____no_output_____
###Markdown
For many models we need to provide the ManyModelsInferenceParameters object. ManyModelsInferenceParameters arguments| Property | Description|| :--------------- | :------------------- || **partition_column_names** | List of column names that identifies groups. || **target_column_name** | \[Optional] Column name only if the inference dataset has the target. || **time_column_name** | \[Optional] Column name only if it is timeseries. || **many_models_run_id** | \[Optional] Many models run id where models were trained. | get_many_models_batch_inference_steps arguments| Property | Description|| :--------------- | :------------------- || **experiment** | The experiment used for inference run. || **inference_data** | The data to use for inferencing. It should be the same schema as used for training.| **compute_target** The compute target that runs the inference pipeline.|| **node_count** | The number of compute nodes to be used for running the user script. We recommend to start with the number of cores per node (varies by compute sku). || **process_count_per_node** The number of processes per node.| **train_run_id** | \[Optional] The run id of the hierarchy training, by default it is the latest successful training many model run in the experiment. || **train_experiment_name** | \[Optional] The train experiment that contains the train pipeline. This one is only needed when the train pipeline is not in the same experiement as the inference pipeline. || **process_count_per_node** | \[Optional] The number of processes per node, by default it's 4. |
###Code
from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder
from azureml.train.automl.runtime._many_models.many_models_parameters import (
ManyModelsInferenceParameters,
)
mm_parameters = ManyModelsInferenceParameters(
partition_column_names=["Store", "Brand"],
time_column_name="WeekStarting",
target_column_name="Quantity",
)
inference_steps = AutoMLPipelineBuilder.get_many_models_batch_inference_steps(
experiment=experiment,
inference_data=inference_ds_small,
node_count=2,
process_count_per_node=8,
compute_target=compute_target,
run_invocation_timeout=300,
output_datastore=output_inference_data_ds,
train_run_id=training_run.id,
train_experiment_name=training_run.experiment.name,
inference_pipeline_parameters=mm_parameters,
)
from azureml.pipeline.core import Pipeline
inference_pipeline = Pipeline(ws, steps=inference_steps)
inference_run = experiment.submit(inference_pipeline)
inference_run.wait_for_completion(show_output=False)
###Output
_____no_output_____
###Markdown
Retrieve resultsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
from azureml.contrib.automl.pipeline.steps.utilities import get_output_from_mm_pipeline
forecasting_results_name = "forecasting_results"
forecasting_output_name = "many_models_inference_output"
forecast_file = get_output_from_mm_pipeline(
inference_run, forecasting_results_name, forecasting_output_name
)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = [
"Week Starting",
"Store",
"Brand",
"Quantity",
"Advert",
"Price",
"Revenue",
"Predicted",
]
print(
"Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed."
)
df.head(10)
###Output
_____no_output_____
###Markdown
7.0 Publish and schedule the inference pipeline (Optional) 7.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline_inf = inference_pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
7.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____ |
separate_rows/separate_rows.ipynb | ###Markdown
Separate a collapsed column into multiple rowsDate: May 29, 2019Made by: Cristian E. Nuno OverviewSometimes a variable has multiple elements stored in one row, where they are each separated by a delimeter (i.e. `,`, `\t`, `\`, etc.). Often it more useful to separate those elements into their own records so our [data becomes tidy](http://vita.had.co.nz/papers/tidy-data.pdf). GoalIf a variable contains observations with multiple delimited values, this notebook will show you how to separate the values and place each one in its own row as a pandas DataFrame. Stack OverflowThis would not be possible without [this answer from Stack Overflow](https://stackoverflow.com/a/28182629/7954106) regarding the separation of elements in a column into multiple rows. Load necessary modules
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Load necessary dataToday we'll be using [Chicago Public Schools (CPS) School Year 2018-2019 school profile data](https://cenuno.github.io/pointdexter/reference/cps_sy1819.html).
###Code
relevant_columns = ["school_id", "short_name",
"primary_category", "grades_offered_all"]
cps_sy1819 = pd.read_csv("../raw_data/cps_sy1819_profiles.csv")[relevant_columns]
cps_sy1819.head()
###Output
_____no_output_____
###Markdown
[Cast each element in the `Series` as a string](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.html) and [split each string by the delimiter](https://docs.python.org/3.7/library/stdtypes.htmlstr.split). Afterwards, store the `Series` object as a `list` of lists.
###Code
cps_sy1819["grades_offered_all"].str.split(",")[0:5]
###Output
_____no_output_____
###Markdown
Store results in a [`DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.htmlpandas.DataFrame)After coverting the `list` to a `DataFrame`, you'll notice a lot of `None` values. Behind the hood, `pandas` is ensuring each record has 13 columns. Since not all schools serve 13 grades, they'll get `None` values.
###Code
pd.DataFrame(cps_sy1819["grades_offered_all"].str.split(",").tolist(),
index=cps_sy1819.index).head()
###Output
_____no_output_____
###Markdown
Covert the `DataFrame` to a `Series` with a multi-level index via [`stack()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.stack.html)> The function is named by analogy with a collection of books being re-organised from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of of each other (in the index of the dataframe).
###Code
pd.DataFrame(cps_sy1819["grades_offered_all"].str.split(",").tolist(),
index=cps_sy1819.index).head().stack()[0:10]
###Output
_____no_output_____
###Markdown
Putting it all together[Reset the index](http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.reset_index.html) to add the old index is added as a columnWe will no longer need the lower level index for each grade in each school so it can be dropped. Instead, the higher level index for each school is what we will use moving forward.*Note: I renamed the columns to make them easier to understand.*
###Code
grades_df = pd.DataFrame(cps_sy1819["grades_offered_all"].str.split(",").tolist(),
index=cps_sy1819.index).stack()
grades_df = grades_df.reset_index()
grades_df = grades_df.drop("level_1", axis=1)
grades_df.columns = ["school_index", "grade_offered"]
grades_df.head(10)
###Output
_____no_output_____
###Markdown
One Hot EncodingWe would like to one hot encode the `grade_offered` feature in `grades_df` to create new features that identify if a CPS school serves a particular grade. *To learn more about one hot encoding, [click here](https://github.com/cenuno/classification/tree/master/oheone-hot-encoding).*
###Code
from sklearn.preprocessing import OneHotEncoder
# drop the first category to avoid multi-collinearity
encoder = OneHotEncoder(drop="first",
categories="auto").fit(grades_df[["grade_offered"]])
ohe = pd.DataFrame(encoder.transform(grades_df[["grade_offered"]]).toarray(),
columns=encoder.get_feature_names(["grade"]))
ohe.head()
###Output
_____no_output_____
###Markdown
Column-bind `ohe` onto `grades_df`Notice that we dropped `grade_offered` since it's redundant information.
###Code
grades_ohe_df = pd.concat([grades_df.drop("grade_offered", axis=1), ohe], axis=1)
grades_ohe_df.head()
###Output
_____no_output_____
###Markdown
DeduplicateAt this point, `grades_ohe_df` is nearly done. We need to sum the one hot encoded features such that there is one record per `school_index` value. At the moment, there is one record per `1.0` value per one hot encoded feature per school.That is why there are more records in `grades_ohe_df` than in `cps_sy1819`.
###Code
grades_ohe_df.shape
cps_sy1819.shape
###Output
_____no_output_____
###Markdown
By [grouping by](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html) `school_index`, we can sum all of the one hot encoded features. This reduces the number of records such that there is only one record per `school_index`. Now each record contains many `1.0` values in the one hot encoded features because one school can serve many grades.*Note that we are subsetting the group by object only to those one hot encoded features via `ohe.columns`. Now that we have our group, we sum all of these features by using [`.agg(sum)`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.core.groupby.DataFrameGroupBy.agg.html).*
###Code
grades_ohe_dedup_df = grades_ohe_df.groupby("school_index")[ohe.columns].agg("sum").reset_index()
grades_ohe_dedup_df.head()
###Output
_____no_output_____
###Markdown
Merge `grades_ohe_dedup_df` onto `cps_sy1819` via a left joinWe'll use the indices from `cps_sy1819` on the left-hand side, along with the `school_index` values from `grades_ohe_dedup_df`, to perfom the join.After the join, we'll drop the `school_index` feature since it's redundant information in that `school_id` already indicates that each record is related to one particular CPS school.
###Code
cps_sy1819 = pd.merge(cps_sy1819,
grades_ohe_dedup_df,
left_index=True,
right_on="school_index",
how="left").drop("school_index", axis=1)
cps_sy1819.head(10)
###Output
_____no_output_____ |
src/user_guide/config_files.ipynb | ###Markdown
Configuration files * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * SoS reads multiple configuration files and merge the results * User configuration files can be specified with option `-c` * Content of configuration file is available through variable `CONFIG` * Host-specific paths can be accessed by `path(name, default)` SoS configuration files SoS reads configurations from * A site configuration file `site_config.yml` under the sos package directory. This is where system adminstrators define system-wide configurations (e.g. host definitions) for all users.* A host configuration file `~/.sos/hosts.yml` that defines properties of local and remote hosts.* A global sos configuration file `~/.sos/config.yml` that defines other user-specific settings.* And an optional configuration file specified by command line option `-c` that defines workflow-specific settings.The configuration files should be in the format of [`YAML`](http://yaml.org/) or its subset format [`JSON`](http://json-schema.org/implementations.html). When a SoS script is loaded, SoS looks for and parses site and global configuration files and an optional user-specified configuration file. The results are used by SoS for the execution of workflows, and are available to the workflow as a global variable `CONFIG`. Merge of multiple configuration files All configurations from the aforementioned files are merged to a single dictionary. A dictionary could therefore contain keys defined in different configuration files and a latter file could overwrite keys defined in a previous file. For example, if * `{'A': {'B': 'old', 'C': 'old'}` is defined in `~/.sos/config.yml` using ``` A: B: old C: old ``` * `{'A': {'B': 'new', 'D': 'new'}` is defined in `my_config.yml` using ``` A: B: new D: new ```then the final result using `-c my_config.yml` would be `{'A': {'B': 'new', 'C': 'old', 'D': 'new'}}` as if a sinle configuration file with content ``` A: B: new C: old D: new ```is used. This is how **site or global configurations can be overridden by user configurations**. Derived dictionary keys A special key `based_on` will be processed after all configuration files are loaded. The value of `based_on` should be one or more keys to other dictionaries in the configuration (e.g. `hosts.cluster`). The consequence of this key is that the items from the referred dictionaries would be merged to the present dictionary if they do not exist in the present dictionary. This allows you to derive a dictionary from an existing one. For example,
###Code
%save my_config.yml -f
hosts:
head_node:
description: head_node of cluster
address: domain.com
cluster:
description: Cluster
based_on: hosts.head_node
queue_type: pbs
%run -c my_config.yml -v1
print(CONFIG['hosts']['cluster'])
###Output
{'description': 'Cluster', 'queue_type': 'pbs', 'address': 'domain.com'}
###Markdown
String interpolation SoS interpolates string values if they contain `{ }`. The expressions enclosed by `{ }` would be evaluated by variables defined in in the root dictionary of `CONFIG`, or the dictionary in which the value is defined, or variables provided by users in case of task or workflow templates.For example, let us define a config file using magic `%save`
###Code
%save my_config.yml -f
user_name: user
hosts:
cluster:
address: "{user_name}@domain.com:{port}"
port: 123
###Output
_____no_output_____
###Markdown
When the configuration file is loaded with option `-c`, the `address` in `hosts.cluster` is expanded with `user` defined in the root dictionary, and `port` defined in the local dictionary.
###Code
%run -c my_config.yml
print(CONFIG['hosts']['cluster'])
###Output
{'address': '[email protected]:123', 'port': 123}
###Markdown
Because key `user_name` is frequently used in `hosts.yml`, **SoS automatically defines `user_name` as the local user ID (all lower case) in `CONFIG` if it is not defined in any of the configuration files**. String interpolation happens after `based_on`, so the following usage is allowed:
###Code
%save my_config.yml -f
hosts:
host_r:
address: localhost
R_version: 3.1
workflow_template: |
echo module load R/{R_version}
{command}
host_r33:
based_on: hosts.host_r
R_version: 3.3
###Output
_____no_output_____
###Markdown
This configuration file defines hosts named `host_r` and `host_r33` with address `localhost`. The `workflow_template` would be used if the host name is specified with option `-r`. Although the example is meant for a cluster system that loads appropriate module with command `module load`, this example just `echo` the `module load` line to show how the `workflow_template` is expanded.First, if we use host `host_r`, `R_version=3.1` will be used:
###Code
%run -r host_r -c my_config.yml -v0
print('Hello')
###Output
module load R/3.1
[95mWARNING[0m: [95mTask M4_b1c14581f8718798 inactive for more than 4830 seconds, might have been killed.[0m
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
If we use host `host_r33`, `R_version=3.3` will be used to expand `workflow_template` derived from `host_r`.
###Code
%run -r host_r33 -c my_config.yml -v0
print('Hello')
###Output
module load R/3.3
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Then, finally, if we provide a value of `R_version` from command line, it will override any existing values defined in the config file.
###Code
%run -r host_r R_version=4.3 -c my_config.yml -v0
print('Hello')
###Output
module load R/4.3
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Use of configuration files Variable `CONFIG` As shown above, the dictionary loaded from SoS configuration files is available to SoS workflow as variable `CONFIG`. This allows a workflow to retrieve settings from configuration files.For example, a workflow could be define as follows, which uses `Bob` as a default value for `manager`
###Code
%run -v0
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
uses `Elena` from command line
###Code
%run --manager Elena -v0
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Or, with the following configuration file
###Code
%save myconfig.yml -f
manager: Martin
###Output
_____no_output_____
###Markdown
use default values from a configuration file
###Code
%run -c myconfig.yml -v0
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Host-dependent paths path(name, default) The path datatype of SoS is derived from `pathlib.Path`. One of the additions of this datatype is paramters `name and default, which returns a pre-defined path defined in CONFIG["hosts"][current-host]["paths"] where current-host is normally localhost but can be one of the remote hosts if the function is called from a remote host. A default value could be returned if name is not available in the configuration. The `hosts` definitions in `~/.sos/hosts.yml` allow the definition of paths for different hosts. For clarity let us define a local configuration file that points `localhost` to a `example_host` configuration.
###Code
%save myconfig.yml -f
localhost: example_host
hosts:
example_host:
address: localhost
paths:
home: /Users/{user_name}
project: /Users/{user_name}/Documents
tmp: /tmp
###Output
_____no_output_____
###Markdown
Without worrying about the `localhost` part for now, this configuration file defines a few paths for the localhost. The `paths` could be retrieved using `path(name='project')` so that you can write your script in a host-independent way. For example, the following workflow uses `path(name='project')` to get the host-specific `project` directory, which is defined as `/Users/bpeng1/Documents` in `myconfig.yml`.
###Code
%run -c myconfig.yml -v1
sh: workdir=path(name='project')
echo Working on `pwd`
###Output
Working on /Users/bpeng1/Documents
###Markdown
If you are uncertain if `project` is defined for current host, you can use `default` to specify a default value
###Code
%run -c myconfig.yml -v1
import os
sh: workdir=path(name='scratch', default='~')
echo Working on `pwd`
###Output
Working on /Users/bpeng1
###Markdown
Configuration files * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * SoS reads multiple configuration files and merge the results * User configuration files can be specified with option `-c` * Content of configuration file is available through variable `CONFIG` * Host-specific paths can be accessed by `path(name, default)` SoS configuration files SoS reads configurations from * A site configuration file `site_config.yml` under the sos package directory. This is where system adminstrators define system-wide configurations (e.g. host definitions) for all users.* A host configuration file `~/.sos/hosts.yml` that defines properties of local and remote hosts.* A global sos configuration file `~/.sos/config.yml` that defines other user-specific settings.* And an optional configuration file specified by command line option `-c` that defines workflow-specific settings.The configuration files should be in the format of [`YAML`](http://yaml.org/) or its subset format [`JSON`](http://json-schema.org/implementations.html). When a SoS script is loaded, SoS looks for and parses site and global configuration files and an optional user-specified configuration file. The results are used by SoS for the execution of workflows, and are available to the workflow as a global variable `CONFIG`. Merge of multiple configuration files All configurations from the aforementioned files are merged to a single dictionary. A dictionary could therefore contain keys defined in different configuration files and a latter file could overwrite keys defined in a previous file. For example, if * `{'A': {'B': 'old', 'C': 'old'}` is defined in `~/.sos/config.yml` using ``` A: B: old C: old ``` * `{'A': {'B': 'new', 'D': 'new'}` is defined in `my_config.yml` using ``` A: B: new D: new ```then the final result using `-c my_config.yml` would be `{'A': {'B': 'new', 'C': 'old', 'D': 'new'}}` as if a sinle configuration file with content ``` A: B: new C: old D: new ```is used. This is how **site or global configurations are extended or overridden by user configurations**. Derived dictionary keys A special key `based_on` will be processed after all configuration files are loaded. The value of `based_on` should be one or more keys to other dictionaries in the configuration (e.g. `hosts.cluster`). The consequence of this key is that the items from the referred dictionaries would be merged to the present dictionary if they do not exist in the present dictionary. This allows you to derive a dictionary from an existing one. For example,
###Code
%save my_config.yml -f
hosts:
head_node:
description: head_node of cluster
address: domain.com
cluster:
description: Cluster
based_on: hosts.head_node
queue_type: pbs
%run -c my_config.yml -v1
print(CONFIG['hosts']['cluster'])
###Output
{'description': 'Cluster', 'queue_type': 'pbs', 'address': 'domain.com'}
###Markdown
String interpolation Internally, SoS interpolates string values as if they are Python f-strings. That is to say, expressions inside `{ }` will be interpolated before they are used. For example, let us assume that we have an incomplete host definition as follows: ```ymluser_name: userhosts: desktop: paths: home: "{os.environ['HOME']}" cluster: address: "{user_name}@domain.com:{port}" port: 123 queue: medium task_template: | PBS -q {queue}``` We can see that `hosts` -> `cluster` -> `address` and `task_template` have expressions in `{ }` that will be expanded as f-string by SoS.The f-strings will be expanded according to the following rules:1. Variables provided from workflow or command line have the highest priority. For example, if `queue='long'` is specified as runtime options of tasks, variable `queue` will be expanded as `long`.```%run -q clustertask: queue='long'...```2. Variables in the parent dictionary. In this example `port` would be used for `address`, and `queue` would be used for `task_template` if it is not defined from workflow. That is to say `queue: medium` provides a default value to variable `queue`.3. Variables in the root of the configuration dictionary. In this example `user_name` is defined and would be used for `address`. Because key `user_name` is frequently used in `hosts.yml`, **SoS automatically defines `user_name` as the local user ID (all lower case) in `CONFIG` if it is not defined in any of the configuration files**.Note that module `os` is made available during string interpolation to allow expansion of environment variables from `os.environ`. Putting these knowledge in use, let us create a configuration file with `%save` magic
###Code
%save my_config.yml -f
hosts:
host_r:
address: localhost
R_version: 3.1
workflow_template: |
echo module load R/{R_version}
{command}
host_r33:
based_on: hosts.host_r
R_version: 3.3
###Output
_____no_output_____
###Markdown
This configuration file defines hosts named `host_r` and `host_r33` with address `localhost`. The `workflow_template` would be used if the host name is specified with option `-r`. Although the example is meant for a cluster system that loads appropriate module with command `module load`, this example just `echo` the `module load` line to show how the `workflow_template` is expanded.First, if we use host `host_r`, `R_version=3.1` will be used:
###Code
%run -r host_r -c my_config.yml -v1
print('Hello')
###Output
module load R/3.1
Hello
###Markdown
If we use host `host_r33`, `R_version=3.3` will be used to expand `workflow_template` derived from `host_r`.
###Code
%run -r host_r33 -c my_config.yml -v1
print('Hello')
###Output
module load R/3.3
Hello
###Markdown
Then, finally, if we provide a value of `R_version` from command line, it will override any existing values defined in the config file.
###Code
%run -r host_r R_version=4.3 -c my_config.yml -v0
print('Hello')
###Output
module load R/4.3
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Use of configuration files Variable `CONFIG` As shown above, the dictionary loaded from SoS configuration files is available to SoS workflow as variable `CONFIG`. This allows a workflow to retrieve settings from configuration files.For example, a workflow could be define as follows, which uses `Bob` as a default value for `manager`
###Code
%run -v0
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
uses `Elena` from command line
###Code
%run --manager Elena -v0
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (1 job completed)
###Markdown
Or, with the following configuration file
###Code
%save myconfig.yml -f
manager: Martin
###Output
_____no_output_____
###Markdown
use default values from a configuration file
###Code
%run -c myconfig.yml -v1
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
Martin
###Markdown
Host-dependent paths path(name, default) The path datatype of SoS is derived from `pathlib.Path`. One of the additions of this datatype is paramters `name and default, which returns a pre-defined path defined in CONFIG["hosts"][current-host]["paths"] where current-host is normally localhost but can be one of the remote hosts if the function is called from a remote host. A default value could be returned if name is not available in the configuration. The `hosts` definitions in `~/.sos/hosts.yml` allow the definition of paths for different hosts. For clarity let us define a local configuration file that points `localhost` to a `example_host` configuration.
###Code
%save myconfig.yml -f
localhost: example_host
hosts:
example_host:
address: localhost
paths:
home: /Users/{user_name}
project: /Users/{user_name}/Documents
tmp: /tmp
###Output
_____no_output_____
###Markdown
Without worrying about the `localhost` part for now, this configuration file defines a few paths for the localhost. The `paths` could be retrieved using `path(name='project')` so that you can write your script in a host-independent way. For example, the following workflow uses `path(name='project')` to get the host-specific `project` directory, which is defined as `/Users/bpeng1/Documents` in `myconfig.yml`.
###Code
%run -c myconfig.yml -v1
sh: workdir=path(name='project')
echo Working on `pwd`
###Output
Working on /Users/bpeng/vatlab/sos-docs/src/user_guide
###Markdown
If you are uncertain if `project` is defined for current host, you can use `default` to specify a default value
###Code
%run -c myconfig.yml -v1
import os
sh: workdir=path(name='scratch', default='~')
echo Working on `pwd`
###Output
Working on /Users/bpeng/vatlab/sos-docs/src/user_guide
###Markdown
Configuration Files * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * SoS reads multiple configuration files and merge the results * User configuration files can be specified with option `-c` * Content of configuration file is available through variable `CONFIG` * Host-specific paths can be accessed by `path(name, default)` SoS configuration files SoS reads configurations from * A site configuration file `site_config.yml` under the sos package directory. This is where system adminstrators define system-wide configurations (e.g. host definitions) for all users.* A host configuration file `~/.sos/hosts.yml` that defines properties of local and remote hosts.* A global sos configuration file `~/.sos/config.yml` that defines other user-specific settings.* And a configuration file specified by command line option `-c` that defines workflow-specific settings.The configuration files should be in the format of [`YAML`](http://yaml.org/) or its subset format [`JSON`](http://json-schema.org/implementations.html). When a SoS script is loaded, SoS looks for and parses site and global configuration files, then optionally a configuration file specified by command line option `-c`. The results are stored in a global variable `CONFIG` that is available to the script. Merge of multiple configuration files All configurations from the aforementioned files are merged to a single dictionary. A dictionary could therefore contain keys defined in different configuration files and a latter file could overwrite keys defined in a previous file. For example, if * `{'A': {'B': 'old', 'C': 'old'}` is defined in `~/.sos/config.yml` using ``` A: B: old C: old ``` * `{'A': {'B': 'new', 'D': 'new'}` is defined in `my_config.yml` using ``` A: B: new D: new ```then the final result using `-c my_config.yml` would be `{'A': {'B': 'new', 'C': 'old', 'D': 'new'}}` as if a sinle configuration file with content ``` A: B: new C: old D: new ```is used. This is how site or global configurations can be overridden by user configurations. String interpolation SoS interpolates string values if they contain `{ }`. The expressions enclosed by `{ }` would be evaluated with a local namespace that is the dictionary in which the key exists, and a global namespace that is the complete `CONFIG` dictionary. That is to say, if a configuration file contains```user_name: userhosts: cluster: address: "{user_name}@domain.com:{port}" port: 123````CONFIG['hosts']['cluster']['address']` would be interpolated as```[email protected]:123```using `port` from the `CONFIG['host']['cluster']` and `user_name` from the top level `CONFIG['user_name']`. You will need to double the braces (`{{ }}` to include `{ }` in the config file. Because key `user_name` is frequently used in `hosts.yml`, SoS automatically defines `user_name` as the local user ID (all lower case) in `CONFIG` if it is not defined in any of the configuration files. Derived dictionary keys A special key `based_on` will be processed after all configuration files are loaded. The value of `based_on` should be one or more keys to other dictionaries in the configuration (e.g. `hosts.cluster`. The consequence of this key is that the items from the referred dictionaries would be merged to the present dictionary if they do not exist in the present dictionary. This allows you to derive a dictionary from an existing one. For example, ```hosts: head_node: description: head_node of cluster address: "{user_name}@domain.com:{port}" port: 123 paths: home: "/home/{user_name}" cluster: description: Cluster based_on: hosts.head_node queue_type: pbs```allows `hosts["cluster"]` to be derived from `hosts["head_node"]`, and```hosts: cat: based_on: hosts.a_very_long_name```effectively creates an alias `cat` to another host with `a_very_long_name`. Command `sos config` Although `yaml` is not a difficult format to learn. It is often easier to use command `sos config` to check and set values in configuration files, especially for complex data types. Set configuration `sos config` by default works on `~/.sos/config.yml` file. For example
###Code
!sos config --set cutoff 0.5
###Output
Set cutoff to '0.5'
###Markdown
creates `~/.sos/config.yml` if it does not exist, or append to this file otherwise, with content
###Code
!cat ~/.sos/config.yml
###Output
cutoff: '0.5'
###Markdown
You can specify a configuration file and add the content to it with option `-c`:
###Code
!sos config -c new_config.yml --set cutoff.low 1
###Output
Set cutoff to {'low': 1}
###Markdown
would create a configuration file `myconfig.yml`
###Code
!cat new_config.yml
###Output
cutoff:
low: 1
###Markdown
Note that `cutoff.low` is interpreted as dictionary `cutoff` with key `low`, and the command is clever enough to handle partial values (e.g. of a dictionary). For example, the following command will update instead of replacing `cutoff`
###Code
!sos config -c new_config.yml --set cutoff.high 2
###Output
Set cutoff to {'low': 1, 'high': 2}
###Markdown
The `--set` option can handle lists:
###Code
!sos config -c new_config.yml --set values 1 2 3
###Output
Set values to [1, 2, 3]
###Markdown
And it accepts Python expressions such as a dictionary. The tricky part here is that SoS would interpolate command line (`!` magic) if you execute the command in SoS notebook, so you will have to double the braces here. You do not need to do it if you execute the command from a terminal.
###Code
!sos config -c new_config.yml --set samples "{{'A': 'A.txt'}}"
###Output
_____no_output_____
###Markdown
View configurations Running command `sos config` without any parameter will list all configurations in a dictonary format. Because we set `cutoff` to `0.5` to `~/.sos/config.yml`, the following command shows `cutoff` and a `user_name` key generated by SoS.
###Code
!sos config
###Output
{'cutoff': '0.5', 'user_name': 'bpeng1'}
###Markdown
If you are interested in only one of the items, you can use option `--get` to list it.
###Code
!sos config --get cutoff
###Output
cutoff '0.5'
###Markdown
Of course you can use `-c` to include another configuration file
###Code
!sos config --get cutoff -c new_config.yml
###Output
cutoff.high 2
cutoff.low 1
###Markdown
or only one of the keys
###Code
!sos config -c new_config.yml --get cutoff.low
###Output
cutoff.low 1
###Markdown
Remove a key from a configuration file Finally, if you would like to remove a key from a configuration file, you can use option `--unset`.
###Code
!sos config -c new_config.yml --unset cutoff
###Output
Unset cutoff
###Markdown
Running `sos config` again will show the `cutoff` from `~/.sos/config.yml`, which was overriden by `cutoff` defined in `new_config.yml`.
###Code
!sos config --get cutoff -c new_config.yml
###Output
cutoff '0.5'
###Markdown
Variable `CONFIG` As stated above, you can create a configuration file and load it with option `-c`, and the results would be available as a magic variable `CONFIG` to the workflow.Let us create a yaml file with some simple content using a `report` action.
###Code
report: output='myconfig.yml'
# A list of tasty fruits
martin:
name: Martin D'vloper
job: Developer
skill: Elite
manager: Martin
###Output
_____no_output_____
###Markdown
When you execute any workflow with option `-c myconfig.yml`, the content of the configuration file would be available as keys of variable `CONFIG`. Configuration files are frequently used to specify system configurations. For example, with the following definition of parameter `manager`, the workflow will take default value `Bob` if run without option,
###Code
%run
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
Bob
###Markdown
take user specified value from command line
###Code
%run --manager Me
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
Me
###Markdown
or values from a configuration file if a configuration file is specified
###Code
%run -c myconfig.yml
parameter: manager = CONFIG.get('manager', 'Bob')
print(manager)
###Output
Martin
###Markdown
Host-dependent paths path(name, default) The path datatype of SoS is derived from `pathlib.Path`. One of the additions of this datatype is paramters `name and default, which returns a pre-defined path defined in CONFIG["hosts"][current-host]["paths"] where current-host is normally localhost but can be one of the remote hosts if the function is called from a remote host. A default value could be returned if name is not available in the configuration. The `hosts` definitions in `~/.sos/hosts.yml` allow the definition of paths for different hosts. For clarity let us define a local configuration file that points `localhost` to a `example_host` configuration.
###Code
report: output='myconfig.yml'
localhost: example_host
hosts:
example_host:
address: localhost
paths:
home: /Users/{user_name}
project: /Users/{user_name}/Documents
tmp: /tmp
###Output
_____no_output_____
###Markdown
Without worrying about the `localhost` part for now, this configuration file defines a few paths for the localhost. The `paths` could be retrieved using `path(name='project')` so that you can write your script in a host-independent way. For example, the following workflow uses `path(name='project')` to get the host-specific `project` directory, which is defined as `/Users/bpeng1/Documents` in `myconfig.yml`.
###Code
%run -c myconfig.yml
sh: workdir=path(name='project')
echo Working on `pwd`
###Output
Working on /Users/bpeng1/Documents
###Markdown
If you are uncertain if `project` is defined for current host, you can use `default` to specify a default value
###Code
%run -c myconfig.yml
import os
sh: workdir=path(name='scratch', default='~')
echo Working on `pwd`
###Output
Working on /Users/bpeng1
|
Tensorflow/Transfer_Learning.ipynb | ###Markdown
Submission Instructions
###Code
# Now click the 'Submit Assignment' button above.
###Output
_____no_output_____
###Markdown
When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
###Code
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____ |
notebooks/Stellar-population.ipynb | ###Markdown
old notebook from KITP...
###Code
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import astropy.coordinates as coord
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.table import Table
from astropy.io import fits
import astropy.units as u
from ezmist import get_one_isochrone
from scipy.ndimage import gaussian_filter
from scipy.interpolate import InterpolatedUnivariateSpline
import gala.coordinates as gc
import gala.dynamics as gd
import gala.potential as gp
import gala.mpl_style
from pyia import GaiaData
from coordinates import pal5_c, galcen_frame, pal5_lead_frame, pal5_trail_frame
from coordinates import trail_epts, lead_epts
t = Table.read('../data/pal5-apw-filtered.fits')
c = coord.SkyCoord(ra=t['ra']*u.deg, dec=t['dec']*u.deg)
c_l = c.transform_to(pal5_lead_frame)
c_t = c.transform_to(pal5_trail_frame)
Xl = np.stack((c_l.phi1.wrap_at(180*u.deg).degree,
c_l.phi2.degree)).T
Xt = np.stack((c_t.phi1.wrap_at(180*u.deg).degree,
c_t.phi2.degree)).T
###Output
_____no_output_____
###Markdown
Select RR Lyrae:
###Code
t = Table.read('/Users/adrian/data/streams/Pal5/pal5_rrl_inside_canonical_footprint.csv')
t.rename_column('ra_2', 'ra')
t.rename_column('dec_2', 'dec')
rrl = GaiaData(t)
rrl = rrl[(rrl.D_kpc > 18) & (rrl.D_kpc < 24)]
# plt.scatter(coord.Distance(distmod=rrl.DM).kpc,
# rrl.D_kpc - coord.Distance(distmod=rrl.DM).kpc)
# plt.xlim(5, 40)
# plt.ylim(-10, 10)
rrl_c = rrl.get_skycoord(distance=rrl.D_kpc*u.kpc)
rrl_c_pal5 = rrl_c.transform_to(gc.Pal5PriceWhelan18)
rrl_c_pal5_ref = gc.reflex_correct(rrl_c_pal5)
rrl_c_l = rrl_c.transform_to(pal5_lead_frame)
rrl_c_t = rrl_c.transform_to(pal5_trail_frame)
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(rrl_c_pal5.pm_phi1_cosphi2,
rrl_c_pal5.pm_phi2,
marker='o', alpha=0.5,
vmin=-20, vmax=20)
ax.set_xlim(-1, 9)
ax.set_ylim(-5, 5)
ax.set_xlabel(r'$\mu_{\phi_1}$')
ax.set_ylabel(r'$\mu_{\phi_2}$')
for X, _c in zip([Xl, Xt],
[rrl_c_l, rrl_c_t]):
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(X[:, 0], X[:, 1],
marker='o', ls='none',
color='k', alpha=0.25, ms=2)
ax.scatter(_c.phi1.wrap_at(180*u.deg).degree,
_c.phi2.degree, color='tab:orange',
lw=1., edgecolor='#666666', s=50)
ax.set_xlim(0, 20.)
ax.set_ylim(-1.5, 1.5)
# ax.set_aspect('equal')
ax.set_xlabel(r'$\phi_1$ [deg]')
ax.set_ylabel(r'$\phi_2$ [deg]')
fig.tight_layout()
fig.set_facecolor('w')
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(rrl_c_pal5.pm_phi1_cosphi2,
rrl_c_pal5.pm_phi2,
# ax.scatter(rrl_c_pal5_ref.pm_phi1_cosphi2,
# rrl_c_pal5_ref.pm_phi2,
marker='o',
vmin=-20, vmax=20)
ax.set_xlim(1, 7)
ax.set_ylim(-3, 3)
ax.set_xlabel(r'$\mu_{\phi_1}$')
ax.set_ylabel(r'$\mu_{\phi_2}$')
pm_mask = ((rrl_c_pal5.pm_phi1_cosphi2 > 3*u.mas/u.yr) &
(rrl_c_pal5.pm_phi1_cosphi2 < 4.5*u.mas/u.yr) &
(rrl_c_pal5.pm_phi2 > 0*u.mas/u.yr) &
(rrl_c_pal5.pm_phi2 < 1.2*u.mas/u.yr))
fig, axes = plt.subplots(3, 1, figsize=(15, 8),
sharex=True)
ax = axes[0]
ax.plot(rrl_c_pal5.phi1.wrap_at(180*u.deg).degree[pm_mask],
rrl_c.distance[pm_mask],
marker='o', ls='none', alpha=0.5, color='k')
ax.set_xlim(-25, 25)
# ax.set_ylim(10, 35)
# ax.set_xlim(-10, 10)
ax.set_ylim(17, 25)
ax = axes[1]
ax.plot(rrl_c_pal5.phi1.wrap_at(180*u.deg).degree[pm_mask],
rrl_c_pal5.pm_phi1_cosphi2[pm_mask],
marker='o', ls='none', alpha=0.5, color='k')
ax = axes[2]
ax.plot(rrl_c_pal5.phi1.wrap_at(180*u.deg).degree[pm_mask],
rrl_c_pal5.pm_phi2[pm_mask],
marker='o', ls='none', alpha=0.5, color='k')
ax.xaxis.set_ticks(np.arange(-25, 25+1e-3, 5));
for X, _c in zip([Xl, Xt],
[rrl_c_l, rrl_c_t]):
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(X[:, 0], X[:, 1],
marker='o', ls='none',
color='k', alpha=0.25, ms=2)
ax.scatter(_c.phi1.wrap_at(180*u.deg).degree[pm_mask],
_c.phi2.degree[pm_mask],
color='tab:orange', zorder=10,
lw=1., edgecolor='#666666', s=50)
ax.set_xlim(0, 25.)
ax.set_ylim(-2.5, 2.5)
# ax.set_aspect('equal')
ax.set_xlabel(r'$\phi_1$ [deg]')
ax.set_ylabel(r'$\phi_2$ [deg]')
fig.tight_layout()
fig.set_facecolor('w')
d_interp = InterpolatedUnivariateSpline([-22., -5, 0, 5],
[23., 21, 20.5, 19.5],
bbox=[-25, 25])
###Output
_____no_output_____
###Markdown
--- Load photometry and shit
###Code
a2ebv = [3.995, 3.214, 2.165, 1.592, 1.211, 1.064]
t = Table.read('../data/pal5_ls_lite.fits')
t = t[(22.5 - 2.5*np.log10(t['flux_g']) - t['ebv']*a2ebv[1]) < 23.5] # de-reddened g cut
c = coord.SkyCoord(ra=t['ra']*u.deg, dec=t['dec']*u.deg)
c_pal5 = c.transform_to(gc.Pal5PriceWhelan18)
phi1 = c_pal5.phi1.wrap_at(180*u.deg)
g0 = 22.5 - 2.5*np.log10(t['flux_g']) - t['ebv']*a2ebv[1]
r0 = 22.5 - 2.5*np.log10(t['flux_r']) - t['ebv']*a2ebv[2]
###Output
_____no_output_____
###Markdown
Shift CMD by distance
###Code
coord.Distance(d_interp(-20)*u.kpc).distmod, coord.Distance(d_interp(10)*u.kpc).distmod
M_g = g0 - coord.Distance(d_interp(-20)*u.kpc).distmod.value
###Output
_____no_output_____
###Markdown
Stellar pop along leading, trailing arms:
###Code
pal5_lead = Table.read('../data/pal5_lead_samples.fits')
pal5_trail = Table.read('../data/pal5_trail_samples.fits')
lead_c = c.transform_to(pal5_lead_frame)
trail_c = c.transform_to(pal5_trail_frame)
stream_mask = np.zeros(len(c), dtype=bool)
control_mask = np.zeros(len(c), dtype=bool)
for cc, tbl, name in zip([lead_c, trail_c],
[pal5_lead, pal5_trail],
['lead', 'trail']):
with open('ctl_paths_{}.pkl'.format(name), 'rb') as _f:
ctl_paths = pickle.load(_f)
with open('str_path_{}.pkl'.format(name), 'rb') as _f:
str_path = pickle.load(_f)
X = np.stack((cc.phi1.degree,
cc.phi2.degree)).T
stream_mask |= str_path.contains_points(X)
control_mask |= ctl_paths[0].contains_points(X) | ctl_paths[1].contains_points(X)
iso = Table.read('/Users/adrian/data/Isochrones/MIST/FeH_-1.3_iso.fits')
iso1 = iso[iso['log10_isochrone_age_yr'] == 10.1]
phasecut = (iso1['phase'] >= 0) & (iso1['phase'] < 4)
iso1 = iso1[phasecut]
iso_g = iso1['dec_g']
iso_r = iso1['dec_r']
fig, axes = plt.subplots(1, 3, figsize=(15, 6),
sharex=True, sharey=True)
cmd_bins = (np.arange(-0.5, 1.1+1e-3, 0.02),
np.arange(0, 7+1e-3, 0.04))
cl_mask = c.separation(pal5_c) < 0.15*u.deg
tail_mask = np.logical_not(cl_mask)
ax = axes[0]
H1, xe, ye = np.histogram2d((g0-r0)[stream_mask & tail_mask],
M_g[stream_mask & tail_mask],
bins=cmd_bins)
ax.pcolormesh(xe, ye, H1.T, cmap='magma')
ax = axes[1]
H2, xe, ye = np.histogram2d((g0-r0)[control_mask & tail_mask],
M_g[control_mask & tail_mask],
bins=cmd_bins)
ax.pcolormesh(xe, ye, H2.T, cmap='magma')
ax = axes[2]
H1 = gaussian_filter(H1, 1)
H2 = gaussian_filter(H2, 1)
ax.pcolormesh(xe, ye, (H1 - H2).T, cmap='Greys',
norm=mpl.colors.LogNorm(vmin=0.5, vmax=20))
ax.set_xlim(-0.5, 1.1)
ax.set_ylim(7, 0)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Stars in the cluster itself
###Code
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.plot(c.ra.deg, c.dec.deg,
marker='.', ls='none', alpha=0.5)
lim1 = 0.075 * u.deg
lim2 = 0.2 * u.deg
ax.add_patch(mpl.patches.Circle((pal5_c.ra.deg, pal5_c.dec.deg),
radius=lim1.value, facecolor='k',
alpha=0.2, zorder=100))
ax.add_patch(mpl.patches.Circle((pal5_c.ra.deg, pal5_c.dec.deg),
radius=lim2.value, facecolor='k',
alpha=0.2, zorder=100))
ax.set_xlim(pal5_c.ra.deg+1, pal5_c.ra.deg-1)
ax.set_ylim(pal5_c.dec.deg-1, pal5_c.dec.deg+1)
cl_mask = (c.separation(pal5_c) > lim1) & (c.separation(pal5_c) < lim2)
cl_mask.sum()
A = (lim2**2 - lim1**2)
r1 = np.sqrt(A)
off1 = coord.SkyCoord(228.4*u.deg, 0.25*u.deg)
bg_mask = (c.separation(off1) < r1)
bg_mask.sum()
iso = Table.read('/Users/adrian/data/Isochrones/MIST/FeH_-1.3_iso.fits')
iso1 = iso[iso['log10_isochrone_age_yr'] == 10.1]
phasecut = (iso1['phase'] >= 0) & (iso1['phase'] < 4)
iso1 = iso1[phasecut]
iso_g = iso1['dec_g']
iso_r = iso1['dec_r']
# ---
iso2 = iso[iso['log10_isochrone_age_yr'] == 9.5]
phasecut = (iso2['phase'] >= 0) & (iso2['phase'] < 4)
iso2 = iso2[phasecut]
iso2_g = iso2['dec_g']
iso2_r = iso2['dec_r']
fig, axes = plt.subplots(1, 3, figsize=(15, 5),
sharex=True, sharey=True)
ax = axes[0]
ax.plot((g0-r0)[cl_mask],
M_g[cl_mask],
marker='o', ls='none',
alpha=0.24, color='k')
ax = axes[1]
ax.plot((g0-r0)[bg_mask],
M_g[bg_mask],
marker='o', ls='none',
alpha=0.24, color='k')
ax = axes[2]
bins = (np.arange(-0.5, 1.+1e-3, 0.02),
np.arange(0, 7+1e-3, 0.05))
H1, xe, ye = np.histogram2d((g0-r0)[cl_mask], g0[cl_mask], bins=bins)
H2, xe, ye = np.histogram2d((g0-r0)[bg_mask], g0[bg_mask], bins=bins)
H1 = gaussian_filter(H1, 1.5)
H2 = gaussian_filter(H2, 1.5)
ax.pcolormesh(xe, ye, (H1 - H2).T,
cmap='Greys',
norm=mpl.colors.LogNorm(vmin=1e-2, vmax=10))
ax = axes[0]
ax.plot(iso_g-iso_r - 0.01*(iso_g-3.5)**1.5,
iso_g-0.15)
ax.plot(iso_g-iso_r + 0.1 + 0.01*(iso_g-3.5)**1.5,
iso_g-0.15)
poly1 = np.stack((iso_g-iso_r - 0.01*(iso_g-3.5)**1.5,
iso_g-0.15)).T
poly2 = np.stack((iso_g-iso_r + 0.1 + 0.01*(iso_g-3.5)**1.5,
iso_g-0.15)).T
grg_path = mpl.path.Path(np.vstack((poly2[poly2[:, 1]<6.8][::-1],
poly1[poly1[:, 1]<6.8])))
# ax.add_patch(mpl.patches.Polygon(grg_path.vertices))
ax.set_xlim(-0.5, 1.)
ax.set_ylim(7, 0)
fig.tight_layout()
poly_mask = grg_path.contains_points(np.stack((g0-r0, M_g)).T[cl_mask])
poly_mask_bg = grg_path.contains_points(np.stack((g0-r0, M_g)).T[bg_mask])
poly_mask_tail = grg_path.contains_points(np.stack((g0-r0, M_g)).T[stream_mask & tail_mask])
poly_mask_bg_tail = grg_path.contains_points(np.stack((g0-r0, M_g)).T[control_mask & tail_mask])
g_bins = np.arange(3, 7+1e-3, 0.5)
N_cl, _ = np.histogram(M_g[cl_mask][poly_mask], g_bins)
N_cl_bg, _ = np.histogram(M_g[bg_mask][poly_mask_bg], g_bins)
N_tail, _ = np.histogram(M_g[stream_mask & tail_mask][poly_mask_tail], g_bins)
N_tail_bg, _ = np.histogram(M_g[control_mask & tail_mask][poly_mask_bg_tail], g_bins)
g_bin_c = 0.5*(g_bins[:-1]+g_bins[1:])
plt.errorbar(g_bin_c, N_cl,
np.sqrt(N_cl),
ls='none', marker='o')
plt.errorbar(g_bin_c, N_cl_bg,
np.sqrt(N_cl_bg),
ls='none', marker='o')
plt.errorbar(g_bin_c, N_cl - N_cl_bg,
np.sqrt(N_cl - N_cl_bg),
ls='none', marker='o', color='k')
kroupa_ms = np.load('/Users/adrian/Downloads/kroupa_masses.npy')
def dN_dm_Grillmair(m, a=0.02, b=1.):
return 2/3*(b**1.5-a**1.5) * m**0.5
ymax = dN_dm_Grillmair(np.linspace(0.4,1,1024), 0.4, 1.).max()
xs = np.random.uniform(0.4, 1., size=500000)
ys = np.random.uniform(0, ymax, size=500000)
grillmair_ms = xs[ys < dN_dm_Grillmair(xs, 0.4, 1.)]
x = iso1['star_mass'][iso1['phase'] < 2]
y = iso_g[iso1['phase'] < 2]
interp_m2g = InterpolatedUnivariateSpline(x[np.argsort(x)],
y[np.argsort(x)],
ext=1)
interp_g2m = InterpolatedUnivariateSpline(y[np.argsort(y)],
x[np.argsort(y)],
ext=1)
kroupa_gs = interp_m2g(kroupa_ms[:100000])
grillmair_gs = interp_m2g(grillmair_ms)
N_kr, _ = np.histogram(kroupa_gs, g_bins)
N_gr, _ = np.histogram(grillmair_gs, g_bins)
fig, axes = plt.subplots(1, 2, figsize=(10, 5),
sharex=True)
ax = axes[0]
for ax, Nnorm in zip(axes, [(N_cl - N_cl_bg)[1],
(N_tail - N_tail_bg)[1]]):
ax.plot(g_bin_c, N_kr / N_kr[1] * Nnorm,
marker='', drawstyle='steps-mid',
label='kroupa')
ax.plot(g_bin_c, N_gr / N_gr[1] * Nnorm,
marker='', drawstyle='steps-mid',
label='kroupa')
axes[0].errorbar(g_bin_c, N_cl - N_cl_bg,
np.sqrt(N_cl - N_cl_bg),
ls='none', marker='o',
color='k',
label='pal 5 cluster LF')
axes[1].errorbar(g_bin_c, N_tail - N_tail_bg,
np.sqrt(N_tail - N_tail_bg),
ls='none', marker='o',
color='tab:red',
label='pal 5 stream LF')
ax.set_xlim(3, 7)
ax.xaxis.set_ticks(np.arange(3, 7+1e-3, 0.5))
for ax in axes:
ax.set_xlabel('$g$ [mag]')
axes[0].set_ylabel('$N$')
axes[0].set_title('Cluster')
axes[1].set_title('Stream')
fig.set_facecolor('w')
###Output
_____no_output_____ |
Notebooks cidades/Guarulhos_Antes.ipynb | ###Markdown
PRÉ-PROCESSAMENTO
###Code
Antes['CS_GESTANT'].replace({1.0: 1, 2.0: 1, 3.0 :1, 4.0 : 1}, inplace= True)
Antes['CS_GESTANT'].replace({5.0: 0, 6.0:0, 9.0:0}, inplace= True)
Antes['CS_RACA'].fillna(9,inplace= True)
Antes['CS_ESCOL_N'].fillna(9,inplace= True)
Antes['SURTO_SG'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['SURTO_SG'].fillna(0,inplace= True)
Antes['NOSOCOMIAL'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['NOSOCOMIAL'].fillna(0,inplace= True)
Antes['FEBRE'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['FEBRE'].fillna(0,inplace= True)
Antes['TOSSE'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['TOSSE'].fillna(0,inplace= True)
Antes['GARGANTA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['GARGANTA'].fillna(0,inplace= True)
Antes['DISPNEIA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['DISPNEIA'].fillna(0,inplace= True)
Antes['DESC_RESP'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['DESC_RESP'].fillna(0,inplace= True)
Antes['SATURACAO'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['SATURACAO'].fillna(0,inplace= True)
Antes['DIARREIA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['DIARREIA'].fillna(0,inplace= True)
Antes['VOMITO'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['VOMITO'].fillna(0,inplace= True)
Antes['PUERPERA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['PUERPERA'].fillna(0,inplace= True)
Antes['CARDIOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['CARDIOPATI'].fillna(0,inplace= True)
Antes['HEMATOLOGI'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['HEMATOLOGI'].fillna(0,inplace= True)
Antes['SIND_DOWN'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['SIND_DOWN'].fillna(0,inplace= True)
Antes['HEPATICA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['HEPATICA'].fillna(0,inplace= True)
Antes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['ASMA'].fillna(0,inplace= True)
Antes['DIABETES'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['DIABETES'].fillna(0,inplace= True)
Antes['NEUROLOGIC'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['NEUROLOGIC'].fillna(0,inplace= True)
Antes['PNEUMOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['PNEUMOPATI'].fillna(0,inplace= True)
Antes['IMUNODEPRE'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['IMUNODEPRE'].fillna(0,inplace= True)
Antes['RENAL'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['RENAL'].fillna(0,inplace= True)
Antes['OBESIDADE'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['OBESIDADE'].fillna(0,inplace= True)
Antes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['ASMA'].fillna(0,inplace= True)
Antes['ANTIVIRAL'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['ANTIVIRAL'].fillna(0,inplace= True)
Antes['UTI'].replace({2.0: 0, 9.0: 0}, inplace= True)
Antes['UTI'].fillna(0,inplace= True)
Antes['SUPORT_VEN'].replace({3.0: 0, 9.0: 0}, inplace= True)
Antes['SUPORT_VEN'].fillna(0,inplace= True)
Antes['PCR_RESUL'].fillna(4,inplace= True)
Antes['HISTO_VGM'].replace({0: 2}, inplace= True)
Antes['DOR_ABD'].replace({9.0: 0, 2.0 :0}, inplace= True)
Antes['DOR_ABD'].fillna(0,inplace= True)
Antes['FADIGA'].replace({9.0: 0, 2.0 :0}, inplace= True)
Antes['FADIGA'].fillna(0,inplace= True)
Antes['PERD_OLFT'].replace({9.0: 0, 2.0 :0}, inplace= True)
Antes['PERD_OLFT'].fillna(0,inplace= True)
Antes['PERD_PALA'].replace({9.0: 0, 2.0 :0}, inplace= True)
Antes['PERD_PALA'].fillna(0,inplace= True)
Antes['VACINA'].fillna(0,inplace= True)
Antes['FATOR_RISC'].replace({'S': 1, 'N':2, '1':1, '2':2}, inplace= True)
Antes['FATOR_RISC'].fillna(0,inplace= True)
###Output
_____no_output_____
###Markdown
- Resetando o Index novamente.
###Code
Antes= Antes.reset_index(drop=True)
Antes.head()
###Output
_____no_output_____
###Markdown
- Aplicação da Dummy nas Features Categóricas
###Code
Antes=pd.get_dummies(Antes, columns=['CS_SEXO', 'CS_GESTANT', 'CS_RACA', 'CS_ESCOL_N',
'SURTO_SG', 'NOSOCOMIAL', 'FEBRE', 'TOSSE', 'GARGANTA', 'DISPNEIA',
'DESC_RESP', 'SATURACAO', 'DIARREIA', 'VOMITO', 'PUERPERA',
'FATOR_RISC', 'CARDIOPATI', 'HEMATOLOGI', 'SIND_DOWN', 'HEPATICA',
'ASMA', 'DIABETES', 'NEUROLOGIC', 'PNEUMOPATI', 'IMUNODEPRE', 'RENAL',
'OBESIDADE', 'VACINA', 'ANTIVIRAL', 'UTI', 'SUPORT_VEN', 'PCR_RESUL',
'HISTO_VGM', 'DOR_ABD', 'FADIGA', 'PERD_OLFT', 'PERD_PALA'], drop_first=True)
Antes.head()
###Output
_____no_output_____
###Markdown
Verificando o Balanceamento
###Code
Antes["EVOLUCAO"].value_counts(normalize=True)
X = Antes[['IDADE_ANOS','CS_SEXO_M','CS_RACA_4.0','FEBRE_1.0','DISPNEIA_1.0','SATURACAO_1.0','UTI_1.0',
'SUPORT_VEN_1.0', 'SUPORT_VEN_2.0', 'PCR_RESUL_2.0','TOSSE_1.0','DESC_RESP_1.0', 'FATOR_RISC_2']]
y = Antes['EVOLUCAO']
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, random_state=42)
Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape
smote = SMOTE(sampling_strategy = 'minority', random_state = 42)
Xtrain_over, ytrain_over = smote.fit_resample(Xtrain,ytrain)
Xtest_over, ytest_over = smote.fit_resample(Xtest,ytest)
Xtrain_over.shape, ytrain_over.shape, Xtest_over.shape, ytest_over.shape
###Output
_____no_output_____
###Markdown
Aplicação do Modelo Escolhido
###Code
random_state=42
BCG = BaggingClassifier()
BCG.fit(Xtrain_over, ytrain_over)
previsoes = BCG.predict(Xtest_over)
previsoes
accuracy_score(ytest_over, previsoes)
# Testar Modelo
idade = 43.0
sexo = 1
raca = 0
febre = 1
dispneia = 1
saturacao = 0
uti = 1
suport1 = 1
suport2 = 0
pcr = 1
tosse = 1
descresp = 0
frisc = 0
prediction = BCG.predict(np.array([idade, sexo, raca, febre, dispneia, saturacao, uti, suport1, suport2, pcr, tosse, descresp, frisc]).reshape(1, -1))
print(prediction)
###Output
[1.]
|
Notebooks - 2/D169 (LR,WD) (32) (NC) Analysis.ipynb | ###Markdown
Library Imports
###Code
from time import time
notebook_start_time = time()
import os
import re
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader as DL
from torch.nn.utils import weight_norm as WN
from torchvision import models, transforms
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
###Output
_____no_output_____
###Markdown
Constants and Utilities
###Code
SEED = 49
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DATA_PATH = "../input/petfinder-pawpularity-score"
FEATURE_PATH = "../input/petfinder-pf-nc-ua-all-dataset"
MODEL_NAME = "densenet169"
DEBUG = False
verbose = False
sc_y = StandardScaler()
def breaker(num=50, char="*") -> None:
print("\n" + num*char + "\n")
def get_targets(path: str) -> np.ndarray:
df = pd.read_csv(os.path.join(path, "train.csv"), engine="python")
targets = df["Pawpularity"].copy().values
return targets.reshape(-1, 1)
def show_graphs(L: list, title=None) -> None:
TL, VL = [], []
for i in range(len(L)):
TL.append(L[i]["train"])
VL.append(L[i]["valid"])
x_Axis = np.arange(1, len(L) + 1)
plt.figure()
plt.plot(x_Axis, TL, "r", label="train")
plt.plot(x_Axis, VL, "b", label="valid")
plt.grid()
plt.legend()
if title:
plt.title("{} Loss".format(title))
else:
plt.title("Loss")
plt.show()
###Output
_____no_output_____
###Markdown
Dataset Template and Build Dataloader
###Code
class DS(Dataset):
def __init__(self, features=None, targets=None):
self.features = features
self.targets = targets
def __len__(self):
return self.features.shape[0]
def __getitem__(self, idx):
return torch.FloatTensor(self.features[idx]), torch.FloatTensor(self.targets[idx])
def build_dataloaders(tr_features: np.ndarray, va_features: np.ndarray,
tr_targets: np.ndarray, va_targets: np.ndarray,
batch_size: int, seed: int):
if verbose:
breaker()
print("Building Train and Validation DataLoaders ...")
tr_data_setup = DS(features=tr_features, targets=tr_targets)
va_data_setup = DS(features=va_features, targets=va_targets)
dataloaders = {
"train" : DL(tr_data_setup, batch_size=batch_size, shuffle=True, generator=torch.manual_seed(seed)),
"valid" : DL(va_data_setup, batch_size=batch_size, shuffle=False)
}
return dataloaders
###Output
_____no_output_____
###Markdown
Build Model
###Code
def build_model(IL: int, seed: int):
class ANN(nn.Module):
def __init__(self, IL=None):
super(ANN, self).__init__()
self.predictor = nn.Sequential()
self.predictor.add_module("BN", nn.BatchNorm1d(num_features=IL, eps=1e-5))
self.predictor.add_module("FC", WN(nn.Linear(in_features=IL, out_features=1)))
def get_optimizer(self, lr=1e-3, wd=0):
params = [p for p in self.parameters() if p.requires_grad]
return optim.Adam(params, lr=lr, weight_decay=wd)
def get_plateau_scheduler(self, optimizer=None, patience=5, eps=1e-8):
return optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, patience=patience, eps=eps, verbose=True)
def forward(self, x1, x2=None):
if x2 is not None:
return self.predictor(x1), self.predictor(x2)
else:
return self.predictor(x1)
if verbose:
breaker()
print("Building Model ...")
print("\n{} -> 1".format(IL))
torch.manual_seed(seed)
model = ANN(IL=IL)
return model
###Output
_____no_output_____
###Markdown
Fit and Predict Helpers
###Code
def fit(model=None, optimizer=None, scheduler=None,
epochs=None, early_stopping_patience=None,
dataloaders=None, fold=None, lr=None, wd=None, verbose=False) -> tuple:
name = "./LR_{}_WD_{}_Fold_{}_state.pt".format(lr, wd, fold)
if verbose:
breaker()
print("Training Fold {}...".format(fold))
breaker()
# else:
# print("Training Fold {}...".format(fold))
Losses = []
bestLoss = {"train" : np.inf, "valid" : np.inf}
start_time = time()
for e in range(epochs):
e_st = time()
epochLoss = {"train" : np.inf, "valid" : np.inf}
for phase in ["train", "valid"]:
if phase == "train":
model.train()
else:
model.eval()
lossPerPass = []
for X, y in dataloaders[phase]:
X, y = X.to(DEVICE), y.to(DEVICE)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
output = model(X)
loss = torch.nn.MSELoss()(output, y)
if phase == "train":
loss.backward()
optimizer.step()
lossPerPass.append(loss.item())
epochLoss[phase] = np.mean(np.array(lossPerPass))
Losses.append(epochLoss)
if early_stopping_patience:
if epochLoss["valid"] < bestLoss["valid"]:
bestLoss = epochLoss
BLE = e + 1
torch.save({"model_state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict()},
name)
early_stopping_step = 0
else:
early_stopping_step += 1
if early_stopping_step > early_stopping_patience:
if verbose:
print("\nEarly Stopping at Epoch {}".format(e))
break
if epochLoss["valid"] < bestLoss["valid"]:
bestLoss = epochLoss
BLE = e + 1
torch.save({"model_state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict()},
name)
if scheduler:
scheduler.step(epochLoss["valid"])
if verbose:
print("Epoch: {} | Train Loss: {:.5f} | Valid Loss: {:.5f} | Time: {:.2f} seconds".format(e+1, epochLoss["train"], epochLoss["valid"], time()-e_st))
if verbose:
breaker()
print("Best Validation Loss at Epoch {}".format(BLE))
breaker()
print("Time Taken [{} Epochs] : {:.2f} minutes".format(len(Losses), (time()-start_time)/60))
breaker()
print("Training Completed")
breaker()
return Losses, BLE, name
#####################################################################################################
def predict_batch(model=None, dataloader=None, mode="test", path=None) -> np.ndarray:
model.load_state_dict(torch.load(path, map_location=DEVICE)["model_state_dict"])
model.to(DEVICE)
model.eval()
y_pred = torch.zeros(1, 1).to(DEVICE)
if re.match(r"valid", mode, re.IGNORECASE):
for X, _ in dataloader:
X = X.to(DEVICE)
with torch.no_grad():
output = model(X)
y_pred = torch.cat((y_pred, output.view(-1, 1)), dim=0)
elif re.match(r"test", mode, re.IGNORECASE):
for X in dataloader:
X = X.to(DEVICE)
with torch.no_grad():
output = model(X)
y_pred = torch.cat((y_pred, output.view(-1, 1)), dim=0)
return y_pred[1:].detach().cpu().numpy()
###Output
_____no_output_____
###Markdown
Train
###Code
def train(features: np.ndarray, targets: np.ndarray,
n_splits: int, batch_size: int, lr: float, wd: float,
epochs: int, early_stopping: int,
patience=None, eps=None) -> list:
metrics = []
KFold_start_time = time()
if verbose:
breaker()
print("\tLR : {}, WD: {}".format(lr, wd))
breaker()
print("Performing {} Fold CV ...".format(n_splits))
fold = 1
for tr_idx, va_idx in KFold(n_splits=n_splits, shuffle=True, random_state=SEED).split(features):
tr_features, va_features = features[tr_idx], features[va_idx]
tr_targets, va_targets = targets[tr_idx], targets[va_idx]
tr_targets = sc_y.fit_transform(tr_targets)
va_targets = sc_y.transform(va_targets)
dataloaders = build_dataloaders(tr_features, va_features,
tr_targets, va_targets,
batch_size, SEED)
model = build_model(IL=tr_features.shape[1], seed=SEED).to(DEVICE)
optimizer = model.get_optimizer(lr=lr, wd=wd)
scheduler = None
if isinstance(patience, int) and isinstance(eps, float):
scheduler = model.get_plateau_scheduler(optimizer, patience, eps)
L, _, name = fit(model=model, optimizer=optimizer, scheduler=scheduler,
epochs=epochs, early_stopping_patience=early_stopping,
dataloaders=dataloaders, fold=fold, lr=lr, wd=wd, verbose=verbose)
y_pred = predict_batch(model=model, dataloader=dataloaders["valid"], mode="valid", path=name)
RMSE = np.sqrt(mean_squared_error(sc_y.inverse_transform(y_pred), sc_y.inverse_transform(va_targets)))
if verbose:
print("\nValidation RMSE [Fold {}]: {:.5f}".format(fold, RMSE))
breaker()
show_graphs(L)
metrics_dict = {"Fold" : fold, "LR" : lr, "WD" : wd, "RMSE" : RMSE}
metrics.append(metrics_dict)
fold += 1
if verbose:
breaker()
print("Total Time to {} Fold CV : {:.2f} minutes".format(n_splits, (time() - KFold_start_time)/60))
return metrics, (time() - KFold_start_time)/60
###Output
_____no_output_____
###Markdown
Main
###Code
def main():
########### Params ###########
if DEBUG:
n_splits = 10
patience, eps = 5, 1e-8
epochs, early_stopping = 5, 5
batch_size = 32
lrs = [1e-2, 1e-3]
wds = [0.0, 1e-1]
else:
n_splits = 10
patience, eps = 5, 1e-8
epochs, early_stopping = 100, 8
batch_size = 32
lrs = [1e-3, 5e-4, 1e-4]
wds = [0.0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
##############################
complete_metrics = []
if verbose:
breaker()
print("Loading Data ...")
else:
breaker()
features = np.load(os.path.join(FEATURE_PATH, "{}_features.npy".format(MODEL_NAME)))
targets = get_targets(DATA_PATH)
for lr in lrs:
for wd in wds:
# Without Scheduler
metrics, time_taken = train(features, targets, n_splits, batch_size, lr, wd, epochs, early_stopping, patience=None, eps=None)
# # With Plateau Scheduler
# metrics = train(features, targets, n_splits, batch_size, lr, wd, epochs, early_stopping, patience=patience, eps=eps)
complete_metrics.append(metrics)
if not verbose:
print("LR : {}, WD: {} -> {:.2f} minutes".format(lr, wd, time_taken))
if verbose:
breaker()
for i in range(len(complete_metrics)):
for j in range(len(complete_metrics[i])):
print(complete_metrics[i][j])
rmse = []
for i in range(len(complete_metrics)):
for j in range(len(complete_metrics[i])):
rmse.append(complete_metrics[i][j]["RMSE"])
best_index = rmse.index(min(rmse))
best_index_1 = best_index // n_splits
best_index_2 = best_index % n_splits
breaker()
print("Best RMSE: {:.5f} using LR: {} and WD: {}".format(complete_metrics[best_index_1][best_index_2]["RMSE"],
complete_metrics[best_index_1][best_index_2]["LR"],
complete_metrics[best_index_1][best_index_2]["WD"]))
breaker()
with open("complete_metrics.pkl", "wb") as fp:
pickle.dump(complete_metrics, fp)
main()
if not verbose:
with open("complete_metrics.pkl", "rb") as fp:
params = pickle.load(fp)
rmse = []
for i in range(len(params)):
for j in range(len(params[i])):
rmse.append(params[i][j]["RMSE"])
best_index = rmse.index(min(rmse))
if DEBUG:
best_index_1 = best_index // 3
best_index_2 = best_index % 3
else:
best_index_1 = best_index // 10
best_index_2 = best_index % 10
breaker()
print("Params: {}".format(params[best_index_1][best_index_2]))
breaker()
breaker()
print("Notebook Rumtime : {:.2f} minutes".format((time() - notebook_start_time)/60))
breaker()
###Output
**************************************************
Notebook Rumtime : 44.59 minutes
**************************************************
|
November 2020/network_science.ipynb | ###Markdown
(Social) Network Analysis By Amir E. Fard [email protected] | [email protected] | www.linkedin.com/in/ebrahimifard/ LogisticsAgenda (Tentative)- 3:45 - 4:30 => SNA : Part One- 4:30 - 4:45 => Break- 4:45 - 5:30 => SNA : Part TwoRecording?- YesCamera & Microphone- Off pleaseInteractivity- Chat and MentimeterNotes- Have a pen and paper ready- No background in Python is required!*** The Jupyter notebook and the corresponding slides are in my Github page: https://github.com/ebrahimifard/sna Outline- Laying down the foundation - What is a network - Network representations - Why network - Some examples - Network, social network? - Weighted networks - Directed networks - Network elements - Path and cycle- Building the networks- Network analysis - Node metrics - Structural metrics- Advanced topics - Erdos-Renyi random graphs - Scale Free Networks- Gephi and Python ------------------------------------ Laying down the foundation ------------------------------------ What is a network? If we take a look at the etymology of the word network, it originally refers to the *open mesh of twine* and *a spider’s web*. an arrangement of intersecting horizontal and vertical lines. These are not practical 
###Code
### https://athenapallas.wordpress.com/2010/06/04/the-web-of-pride-and-envy/
### https://mashedradish.com/2017/12/18/the-etymological-network-of-net/
### https://www.lexico.com/definition/network
###Output
_____no_output_____
###Markdown
Network representation There are more practical ways to represent networks:* **Mathematical notation*** **Adjacency matrix*** **Network visualisation*** **Statistical representation** Two main components of networks- Node / Vertex / point- Link / Edge / line*** each of those elements could get some meta information Mathematical notation 
###Code
#https://medium.com/basecs/a-gentle-introduction-to-graph-theory-77969829ead8
###Output
_____no_output_____
###Markdown
Adjacancy matrix 
###Code
#https://www.ebi.ac.uk/training-beta/online/courses/network-analysis-of-protein-interaction-data-an-introduction/introduction-to-graph-theory/graph-theory-adjacency-matrices/
###Output
_____no_output_____
###Markdown
Network visualisation 
###Code
#https://medium.com/basecs/a-gentle-introduction-to-graph-theory-77969829ead8
###Output
_____no_output_____
###Markdown
Statistical representation 
###Code
#Tweets containing false news (depicted in orange in this data visualization) spread to more people through Twitter than tweets containing true news (teal).
#Vosoughi, S., Roy, D., & Aral, S. (2018). The spread of true and false news online. Science, 359(6380), 1146-1151.
###Output
_____no_output_____
###Markdown
  Discussion 1 When do we use each of the abovementioned representations? (chat) Discussion 2 Why do we use the network data structure? (chat) Why network? - It captures the relationships of the datapoints/components relationships - It allows us to to study the macro behaviour of a system from the micro behaviour of its components (e.g. spread of diseases, citation patterns, collaboration between organisations, energy grids, ... ) Some examples 
###Code
#Pappalardo, L., Cintia, P., Rossi, A., Massucco, E., Ferragina, P., Pedreschi, D., & Giannotti, F. (2019). A public data set of spatio-temporal match events in soccer competitions. Scientific data, 6(1), 1-15.
###Output
_____no_output_____
###Markdown
https://www.nature.com/immersive/d41586-019-03165-4/index.html
###Code
# https://www.nature.com/immersive/d42859-019-00121-0/index.html
###Output
_____no_output_____
###Markdown

###Code
#Helbing, D. (2013). Globally networked risks and how to respond. Nature, 497(7447), 51-59.
###Output
_____no_output_____
###Markdown

###Code
# Goh, K. I., Cusick, M. E., Valle, D., Childs, B., Vidal, M., & Barabási, A. L. (2007). The human disease network. Proceedings of the National Academy of Sciences, 104(21), 8685-8690.
###Output
_____no_output_____
###Markdown
Discussion 3 What are the nodes and links in each of the following networks? (chat) * The airport network* The virus transmission network* The friendship network* The power grid network* The Internet* WWW* The collaboration network (science, actor) Network and Social Networks So far we learned what is network, then what is social network? Depending on the topic of interest the network analysis takes various names (e.g. social network, organisational network, ...) Weighted networks* the amount of time two individual spends with each other instead of just friendship existence in friendship network* the amount of common border instead of just border existence in countries network  Directed network vs undirected network* dircted: import/export networks, influence network (followership network), communication network, ...* undirected: transportation network (roads, rails, airport),  Path, geodesics and cycle Path: a sequence of nodes (which, by most definitions, are all distinct) Geodesics: the shortest path between two nodes Cycle: a non-empty path in which the only repeated nodes are the first and last vertices.  Eulerian cycle: a path that crosses every edge in G exactly once and finishes at the starting node. 
###Code
# Note: In a connected graph, If every node has even degree, the graph has eulrian cycle
# Example: Municipality wants to clean all the streets after christmas. If they can find a hamiltonian cycle then it would be an optimsed route
###Output
_____no_output_____
###Markdown
Hamiltonian path: a path that visits each vertex exactly once. 
###Code
# EXAMPLE : Optimzed route for post.nl to deliver the packages
###Output
_____no_output_____
###Markdown
Discussion 4 Can you think of any application for eulerian/hamiltonian cycle? (chat) ------------------------------------ Building the networks ------------------------------------ Nodes? Links? Directed? Weighted? Boundary? You can add other properties when you visualise your graph (e.g. node/edge color, node size, ... )
###Code
# Building the networks
## first four : It depends on the research question => Unit of analysis
### Nodes :
### Links:
### Directed?
### Weighted?
## We need some limitation factors
### Boundary?
### You can add other properties when you visualise your graph (e.g. node/edge color, node size, ... )
###Output
_____no_output_____
###Markdown
Discussion 5 Imagine an earthquake hit city X. Many humanitarian organisations set off to that city. Many volounteers also join the aid operation. You want to understand the collaboration mechanisms during this disaster. How does your network look like? (just something to think about!) ------------------------------------ Network analysis ------------------------------------ Node metrics- Degrees and degree distribution- Centrality measures Degrees and degree distribution   Centrality measures- degree centrality - C(i) ~ k(i) (centrality of node i changes by degree of that node) - Based on degree centrality, the more connection a node has, the more important that node is. Airport, citation and friendship are the cases that degree centrality works well- closeness centrality - This measure is calculated via C = 1/L(i) which L(i) denotes the average distance of node i to all the others. Collaboration networks is a case that closeness centrality works well - Closeness centrality doesn't span much - betweenness centrality - C(i) ~ shortest paths between all pairs passing through node i - Very large span for large networks- eigenvector centrality - The philosophy underneath this centrality measure: Important nodes are connected to important nodes - Eigen vector centrality is a recursive approach - Eigenvector centrality is a variation of degree centrality. However, unlike degree centrality, which gives equal weights to all the neighbors, eigenvector centrality weights adjacent nodes by their centrality:- page-rank centrality - PageRank is a variant of eigenvector centrality and was popularized in the original algorithm used by Google for ordering its - search results. The PageRank measure was initially proposed for link analysis in the World Wide Web. - PageRank, named after Google co-founder Larry Page, is one of the algorithms that Google uses to rank websites in its search results. - PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is. - The main assumption behind the algorithm is that more important websites are more likely to receive links from other websites. - On this basis the algorithm assigns a weight to each of the nodes in a network. 
###Code
# Reference: Toju, H., Yamamichi, M., Guimaraes Jr, P. R., Olesen, J. M., Mougi, A., Yoshida, T., & Thompson, J. N. (2017). Species-rich networks and eco-evolutionary synthesis at the metacommunity level. Nature ecology & evolution, 1(2), 0024.
# Degree centrality
# C(i) ~ k(i) (centrality of node i changes by degree of that node)
# Based on degree centrality, the more connection a node has, the more important that node is. Airport, citation and friendship are the cases that degree centrality works well
# When to use it: For finding very connected individuals, popular individuals, individuals who are likely to hold most information or individuals who can quickly connect with the wider network.
# Closeness centrality
# This measure is calculated via C = 1/L(i) which L(i) denotes the average distance of node i to all the others. Collaboration networks is a case that closeness centrality works well
# Closeness centrality doesn't span much
# When to use it: For finding the individuals who are best placed to influence the entire network most quickly.
# I should be close to everyone
# Betweenness centrality
# C(i) ~ # shortest paths between all pairs passing through node i
# Very large span for large networks
# When to use it: For finding the individuals who influence the flow around a system.
# Brokerage role, every thing should pass through me
# Eigenvector centrality
# The philosophy underneath this centrality measure: Important nodes are connected to important nodes
# Eigen vector centrality is a recursive approach
# Eigenvector centrality is a variation of degree centrality. However, unlike degree centrality, which gives equal weights to all the neighbors, eigenvector centrality weights adjacent nodes by their centrality:
# I am important becasue my friends are important
# Page Rank centrality
# PageRank is a variant of eigenvector centrality and was popularized in the original algorithm used by Google for ordering its
# search results. The PageRank measure was initially proposed for link analysis in the World Wide Web.
# PageRank, named after Google co-founder Larry Page, is one of the algorithms that Google uses to rank websites in its search results.
#PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is.
# The main assumption behind the algorithm is that more important websites are more likely to receive links from other websites.
# On this basis the algorithm assigns a weight to each of the nodes in a network.
###Output
_____no_output_____
###Markdown
Florentine marriage case 
###Code
## The following Fgure provides the links between the key families in Florence at that time, where a link represents a marriage between members of the two linked families
# This measure of betweenness for the
# Medici is .522. That means that if we look at all the shortest paths between various
# families (other than the Medici) in this network, the Medici lie on over half of them! In
# contrast, a similar calculation for the Strozzi comes out at .103, or just over ten percent.
# The second highest family in terms of betweenness after the Medici is the Guadagni
# with a betweenness of .255. To the extent that marriage relationships were keys to
# communicating information, brokering business deals, and reaching political decisions,
# the Medici were much better positioned than other families, at least according to this
# notion of betweenness.
# Reference: Jackson, M. O. (2010). Social and economic networks. Princeton university press.
###Output
_____no_output_____
###Markdown
Discussion 6 You are the marketing manager of a company and you would like to launch a campaign for your company's new product. Your strategy is to use influencers marketing. To whom do you talk to? what kind of influencer? Discussion 7 You are working in the Police communication department. You receive a news about a terroristic attack in city X. You want to let everybody knows as soon as possible. In addition to traditional media and Police account in social media, you ask a few of influencers to share the news. To whom do you talk to? what kind of influencer?¶ Structural Metrics - Community- Homophily Community   Reference: Mining of the Massive Datasets, Jure Leskovec HomophilyOne related topic to community detection is homophily. Two interesting examples:* School children* Segregation    References: * Baerveldt, C., Van Duijn, M. A., Vermeij, L., & Van Hemert, D. A. (2004). Ethnic boundaries and personal choice. Assessing the influence of individual inclinations to choose intra-ethnic relationships on pupils’ networks. Social Networks, 26(1), 55-74. * Currarini, S., Jackson, M. O., & Pin, P. (2009). An economic model of friendship: Homophily, minorities, and segregation. Econometrica, 77(4), 1003-1045. * Easley, D., & Kleinberg, J. (2010). Networks, crowds, and markets: Reasoning about a highly connected world. Cambridge University Press. Discussion 8 Please take a look at your whatsapp (or any other messaging app you are working with) and indicate among the top 5 person (not group) in your feed/timeline, how many of them have the same nationality as you? ------------------------------------ Advanced topics ------------------------------------ Erdos-Renyi random graphs   Scale Free Network   Reference: * Barabási, A. L., & Bonabeau, E. (2003). Scale-free networks. Scientific american, 288(5), 60-69. * Barabási, A. L. (2016). Network science. Cambridge university press. ------------------------------------ Gephi and Python ------------------------------------ Network elements - Node - Edge
###Code
import networkx as nx
import matplotlib.pyplot as plt
import random
import seaborn as sns
import numpy as np
import collections
from IPython.display import Image
# Making graph
G = nx.Graph()
# Adding the nodes
G.add_node(1)
G.add_nodes_from([2,3,4,5])
G.add_node(6)
G.add_nodes_from([7,8,9,10])
G.add_node(11)
G.add_node(12)
# Adding the edges
G.add_edge(1,3)
G.add_edge(*(1,5))
G.add_edges_from([(2,4), (4,5), (3,4), (10,9), (2,3)])
G.add_edges_from([(7,9), (7,8), (8,10), (6,9)])
# Visualization
# nx.draw(G, with_labels=True)
# nx.draw_kamada_kawai(G, with_labels=True)
# nx.draw_networkx(G, with_labels=True)
# nx.draw_random(G, with_labels=True)
# nx.draw_shell(G, with_labels=True)
# nx.draw_spectral(G, with_labels=True)
nx.draw_spring(G, with_labels=True)
plt.show()
###Output
_____no_output_____
###Markdown
Path, geodesics and cycle
###Code
# First we visualize the graph
nx.draw(G, with_labels=True)
plt.show()
# In calculating the shortest path between nodes in a graph, a very important point is to know whether the graph is connected or disconnected. If it's a connected graph there is no problem, otherwise in disconnected graph, the shortest path algorithm should applied on the connected components
condition = nx.is_connected(G)
sourceNode = "1"
targetNode = "30"
if condition:
print(nx.shortest_path_length(G, source=sourceNode, target=targetNode))
print(nx.shortest_path(G, source=sourceNode, target=targetNode))
print(list(nx.shortest_paths.all_shortest_paths(G, source=sourceNode, target=targetNode)))
# A simple path is a path with no repeated nodes
simplePaths = list(nx.simple_paths.all_simple_paths(G, source=sourceNode, target=targetNode))
nx.cycle_basis(G)
EG = nx.Graph()
EG.add_edges_from([(1,2),(1,3),(2,3)])
if nx.is_eulerian(EG):
for a in nx.eulerian_circuit(EG):
print(a)
# Hamiltonian cycle
HG = nx.DiGraph()
HG.add_edges_from([(1,2),(2,3),(3,4),(4,1)])
nx.tournament.hamiltonian_path(HG)
###Output
_____no_output_____
###Markdown
Maybe you noticed, the difference between hamiltonian and eulerian cycle is really small. An Euler path is a path that crosses every edge exactly once without repeating, if it ends at the initial vertex then it is Euler cycle. A Hamiltonian path passes through each vertex (note not each edge), exactly once, if it ends at the initial vertex then it is a Hamiltonian cycle Network types
###Code
# Node attribute
G =nx.Graph()
G.add_edges_from([(1,2),(1,3),(2,3),(4,5),(5,6),(7,8),(3,6),(4,8)])
# G.nodes is the method to associate an attribute to an existing node
for i in G.nodes:
if i%2 == 0:
G.nodes[i]["colour"] = "Red"
else:
G.nodes[i]["colour"] = "Blue"
G.nodes[i]["name"] = f'Node {i}'
G.add_node(9,colour="Yellow")
G.nodes.data()
# Edge attribute
G =nx.Graph()
G.add_edges_from([(u,random.randint(1,11),
{"weight": random.uniform(1,10), "colour":random.choice(["Red", "Blue"])}
)
for u in range(1,10)
])
G.edges.data()
# Visualization based on attributes
# Here, we added size & colour to the nodes and weight to the edges
G = nx.generators.random_graphs.fast_gnp_random_graph(10,0.25)
edgeWeights = [random.randint(1,5) for i in range(10)]
nodeSize = [random.randint(1,1000) for i in range(10)]
nodeColour = [random.choice(["r","b", "green", "yellow"]) for i in range(10)]
pos = nx.circular_layout(G)
nx.draw_networkx_nodes(G, pos, node_size=nodeSize, node_color=nodeColour)
nx.draw_networkx_edges(G, pos, width=edgeWeights)
plt.show()
DG = nx.DiGraph()
DG.add_edge(1,2)
DG.add_edges_from([(1,4),(1,5),(5,3)])
DG.add_edge(3,2)
DG.add_edges_from([(4,3),(6,2)])
DG.add_nodes_from([7,8,9])
DG.add_edge(9,7)
nx.draw_circular(DG, with_labels=True)
plt.show()
###Output
_____no_output_____
###Markdown
Network Analysis
###Code
# This is the network of fake-news related concepts in wikipedia
wikipediaFakeNewsAdr = "./data/graph_100.gexf"
wikipediaConcepts = nx.read_gexf(wikipediaFakeNewsAdr)
nx.draw(wikipediaConcepts, with_labels=True)
plt.show()
# Degree distribution
degree_sequence = sorted([d for n, d in wikipediaConcepts.degree()], reverse=True) # degree sequence
# print "Degree sequence", degree_sequence
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
fig, ax = plt.subplots()
plt.bar(deg, cnt, width=.80, color='b')
plt.title("Degree Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")
ax.set_xticks([d + 0.4 for d in deg])
ax.set_xticklabels(deg)
plt.show()
###Output
_____no_output_____
###Markdown
Centrality measures
###Code
# Degree centrality
# C(i) ~ k(i) (centrality of node i changes by degree of that node)
# Based on degree centrality, the more connection a node has, the more important that node is. Airport, citation and friendship are the cases that degree centrality works well
# When to use it: For finding very connected individuals, popular individuals, individuals who are likely to hold most information or individuals who can quickly connect with the wider network.
degreeCentrality = nx.degree_centrality(G)
# Closeness centrality
# This measure is calculated via C = 1/L(i) which L(i) denotes the average distance of node i to all the others. Collaboration networks is a case that closeness centrality works well
# Closeness centrality doesn't span much
# When to use it: For finding the individuals who are best placed to influence the entire network most quickly.
closenessCentrality = nx.closeness_centrality(G)
# Betweenness centrality
# C(i) ~ # shortest paths between all pairs passing through node i
# Very large span for large networks
# When to use it: For finding the individuals who influence the flow around a system.
betweennessCentrality = nx.betweenness_centrality(G)
# Eigenvector centrality
# The philosophy underneath this centrality measure: Important nodes are connected to important nodes
# Eigen vector centrality is a recursive approach
# Eigenvector centrality is a variation of degree centrality. However, unlike degree centrality, which gives equal weights to all the neighbors, eigenvector centrality weights adjacent nodes by their centrality:
eigenvectorCentrality = nx.eigenvector_centrality(G)
# Page Rank centrality
# PageRank is a variant of eigenvector centrality and was popularized in the original algorithm used by Google for ordering its
# search results. The PageRank measure was initially proposed for link analysis in the World Wide Web.
# PageRank, named after Google co-founder Larry Page, is one of the algorithms that Google uses to rank websites in its search results.
#PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is.
# The main assumption behind the algorithm is that more important websites are more likely to receive links from other websites.
# On this basis the algorithm assigns a weight to each of the nodes in a network.
pagerankCentrality = nx.pagerank(G)
correlation = Image('./figs/correlation.PNG')
# Source = https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0220061
# Here we calculate correlations between 17 different centrality measures across 212 diverse real-world networks, examine how these correlations relate to variations in network density and global topology, and investigate whether nodes can be clustered into distinct classes according to their centrality profiles. We find that centrality measures are generally positively correlated to each other, the strength of these correlations varies across networks, and network modularity plays a key role in driving these cross-network variations.
###Output
_____no_output_____
###Markdown
Structural Metrics- Community- Homophily
###Code
# The functions in this class are not imported into the top-level networkx namespace. You can access these functions by importing the networkx.algorithms.community module, then accessing the functions as attributes of community
# This class implemented Newman-Girvan method
# The other very popular community detection method is the Louvain method which is implemented in Gephi
from networkx.algorithms import community
communities_generator = community.girvan_newman(G)
top_level_communities = next(communities_generator)
next_level_communities = next(communities_generator)
communities = sorted(map(sorted, next_level_communities))
###Output
_____no_output_____
###Markdown
- Advanced topics - Erdos-Renyi random graphs - Scale Free Network
###Code
ERG = nx.erdos_renyi_graph(1000,0.2)
SFN = nx.scale_free_graph(1000)
SMW = nx.watts_strogatz_graph(1000,3,0.1)
###Output
_____no_output_____
###Markdown
Reference: * Barabási, A. L., & Bonabeau, E. (2003). Scale-free networks. Scientific american, 288(5), 60-69. * Barabási, A. L. (2016). Network science. Cambridge university press. Discussion 9 Make a random directed graph with 50 nodes and give a random weight from 1 to 10 to each edge. Then visualize the graph regarding two following conditions:1. The size of the nodes should be associated with the nodes degree2. The thickness of each edge should be associted with edge weights3. The colour of nodes has to be associated with their degree in a way that as the degree of a node increases the colour of that node becomes more sharp3. The colour of edges has to be associated with their weights in a way that as the weight of an edge increases the colour of that edge becomes more sharp
###Code
G = nx.erdos_renyi_graph(10, 0.1, directed=True)
inDeg = dict(G.in_degree())
outDeg = dict(G.out_degree())
for edge in G.edges:
G[edge[0]][edge[1]]["weight"] = random.randint(1,5)
edgeWeights = [u[2]["weight"] for u in G.edges.data()]
# Making the colour palette and linking it the edges according to theor weights
colourPalette = sns.color_palette("Blues",5)
edgeWeighColour = dict(zip(range(1,6),colourPalette))
for edge in G.edges:
G[edge[0]][edge[1]]["colour"] = edgeWeighColour[G[edge[0]][edge[1]]["weight"]]
edgeColours = [u[2]["colour"] for u in G.edges.data()]
# Associating the color of the nodes with the their out-degree
outDegUnique = np.unique([i for i in outDeg.values()])
colourPalette = sns.color_palette("Reds",len(outDegUnique))
nodeOutDegDict = dict(zip(outDegUnique,colourPalette))
for i in G.nodes:
G.nodes[i]["colour"] = nodeOutDegDict[outDeg[i]]
nodeColour = [i[1]["colour"] for i in G.nodes.data()]
# This line makes the figure bigger
plt.figure(num=None, figsize=(12, 10), dpi=80)
# Setting the layout, nodes, edges and lables
pos = nx.circular_layout(G)
nx.draw_networkx_nodes(G, pos, node_size=[100*inDeg[u] for u in inDeg], node_color=nodeColour)
nx.draw_networkx_edges(G, pos, width=edgeWeights, edge_color=edgeColours)
nx.draw_networkx_labels(G, pos)
# Drawing the network
plt.show()
###Output
_____no_output_____ |
Recommendation Systems.ipynb | ###Markdown
Movie Recommendation System I have used https://www.kaggle.com/tmdb/tmdb-movie-metadata for building recommendation systems.There are three types of recommender systems :- 1. Demographic Filtering - It uses the demographic data of a user to determine which items may be appropriate for recommendation.2. Content Based Filtering - It uses item features to recommend other items similar to what the user likes, based on their previous actions or explicit feedback.3. Collaborative Filtering - This system matches persons with similar interests and provides recommendations based on this matching. Collaborative filters do not require item metadata like its content-based counterparts. Importing libraries and loading data
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from ast import literal_eval
d1=pd.read_csv('tmdb_5000_credits.csv')
d2=pd.read_csv('tmdb_5000_movies.csv')
###Output
_____no_output_____
###Markdown
Merging d1 & d2
###Code
d1.columns = ['id','title','cast','crew']
d2 = d2.merge(d1,on='id')
d2.head()
###Output
_____no_output_____
###Markdown
Demographic FilteringBefore getting started with this -1. we need a metric to score or rate movie2. Calculate the score for every movie3. Sort the scores and recommend the best rated movie to the users.We can use the average ratings of the movie as the score but using this won't be fair enough since a movie with 8.9 average rating and only 3 votes cannot be considered better than the movie with 7.8 as as average rating but 40 votes. So, I'll be using IMDB's weighted rating (wr) which is given as :-  where,1. v is the number of votes for the movie;2. m is the minimum votes required to be listed in the chart;3. R is the average rating of the movie; And4. C is the mean vote across the whole reportWe already have v(vote_count) and R (vote_average) and C can be calculated as
###Code
C= d2['vote_average'].mean()
C
###Output
_____no_output_____
###Markdown
Calculating m, we use 90% percentile as cutoff. It must have more votes than at least 90% of the movies in the list.
###Code
m= d2['vote_count'].quantile(0.9)
m
###Output
_____no_output_____
###Markdown
Qualified movies
###Code
q_movies = d2.copy().loc[d2['vote_count'] >= m]
q_movies.head(2)
q_movies.shape
###Output
_____no_output_____
###Markdown
Calculate our metric for each qualified movie using function w_rating
###Code
def w_rating(x, m=m, C=C):
v = x['vote_count']
R = x['vote_average']
# Calculation based on the IMDB formula
return (v/(v+m) * R) + (m/(m+v) * C)
# Define a new feature 'score' and calculate its value with `w_rating()`
q_movies['score'] = q_movies.apply(w_rating, axis=1)
#Sort movies based on score calculated above
q_movies = q_movies.sort_values('score', ascending=False)
#Print the top 15 movies
q_movies[['title_x', 'vote_count', 'vote_average', 'score']].head(10)
###Output
_____no_output_____
###Markdown
This recommendation system shows the ' Trending Now ' tab of a streaming app
###Code
pop= d2.sort_values('popularity', ascending=False)
plt.figure(figsize=(12,4))
plt.barh(pop['title_x'].head(6),pop['popularity'].head(6), align='center',color='blue')
plt.gca().invert_yaxis()
plt.xlabel("Popularity")
plt.title("Popular Movies")
###Output
_____no_output_____
###Markdown
Content Based Filtering 1. Plot description based RecommenderWe are going use Plot description based Recommender for all movies based on their similarity score.
###Code
d2['overview'].head()
###Output
_____no_output_____
###Markdown
TfIdfVectorizerWe'll use Term Frequency-Inverse Document Frequency (TF-IDF) vectors for each overview.This is very common algorithm to transform text into a meaningful representation of numbers which is used to fit machine algorithm for prediction. 
###Code
#Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'
tfidf = TfidfVectorizer(stop_words='english')
#Replace NaN with an empty string
d2['overview'] = d2['overview'].fillna('')
#Construct the required TF-IDF matrix by fitting and transforming the data
tfidf_matrix = tfidf.fit_transform(d2['overview'])
#Output the shape of tfidf_matrix
tfidf_matrix.shape
###Output
_____no_output_____
###Markdown
Using linear_kernal()We will be using the cosine similarity to calculate a numeric quantity that denotes the similarity between two movies. We use the cosine similarity score since it is independent of magnitude and is relatively easy and fast to calculate. Mathematically, it is defined as follows: 
###Code
# Compute the cosine similarity matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
###Output
_____no_output_____
###Markdown
Function that takes in a movie title as an input and outputs a list of the 10 most similar movies.
###Code
#Construct a reverse map of indices and movie titles
indices = pd.Series(d2.index, index=d2['title_x']).drop_duplicates()
# Function that takes in movie title as input and outputs most similar movies
def get_recommendations(title, cosine_sim=cosine_sim):
# Get the index of the movie that matches the title
idx = indices[title]
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
return d2['title_x'].iloc[movie_indices]
get_recommendations('The Godfather')
get_recommendations('Inception')
###Output
_____no_output_____
###Markdown
2. Credits, Genres and Keywords Based RecommenderWe are going to build a recommender based on the following metadata: the 3 top actors, the director, related genres and the movie plot keywords. This system quality will incraese because of better usage of data.
###Code
features = ['cast', 'crew', 'keywords', 'genres']
for feature in features:
d2[feature] = d2[feature].apply(literal_eval)
# Get the director's name from the crew feature. If director is not listed, return NaN
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
# Returns the list top 3 elements or entire list; whichever is more.
def get_list(x):
if isinstance(x, list):
names = [i['name'] for i in x]
#Check if more than 3 elements exist. If yes, return only first three. If no, return entire list.
if len(names) > 3:
names = names[:3]
return names
#Return empty list in case of missing/malformed data
return []
# Define new director, cast, genres and keywords features that are in a suitable form.
d2['director'] = d2['crew'].apply(get_director)
features = ['cast', 'keywords', 'genres']
for feature in features:
d2[feature] = d2[feature].apply(get_list)
d2[['title_x', 'cast', 'director', 'keywords', 'genres']].head(2)
# Function to convert all strings to lower case and strip names of spaces
def clean_data(x):
if isinstance(x, list):
return [str.lower(i.replace(" ", "")) for i in x]
else:
#Check if director exists. If not, return empty string
if isinstance(x, str):
return str.lower(x.replace(" ", ""))
else:
return ''
# Apply clean_data function to your features.
features = ['cast', 'keywords', 'director', 'genres']
for feature in features:
d2[feature] = d2[feature].apply(clean_data)
###Output
_____no_output_____
###Markdown
We are now in a position to create our "metadata soup", which is a string that contains all the metadata that we want to feed to our vectorizer
###Code
def create_soup(x):
return ' '.join(x['keywords']) + ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])
d2['soup'] = d2.apply(create_soup, axis=1)
###Output
_____no_output_____
###Markdown
Using CountVectorizerIt is used to transform a given text into a vector on the basis of the frequency (count) of each word that occurs in the entire text.
###Code
# Import CountVectorizer and create the count matrix
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(d2['soup'])
# Compute the Cosine Similarity matrix based on the count_matrix
from sklearn.metrics.pairwise import cosine_similarity
cosine_sim2 = cosine_similarity(count_matrix, count_matrix)
# Reset index of our main DataFrame and construct reverse mapping as before
d2 = d2.reset_index()
indices = pd.Series(d2.index, index=d2['title_x'])
get_recommendations('Batman Begins', cosine_sim2)
get_recommendations('The Avengers', cosine_sim2)
###Output
_____no_output_____ |
Restaurant_Data_Analysis_Python.ipynb | ###Markdown
RESTAURANT DATA ANALYSIS OBJECTIVE: To analyse the timings, peak hours of the restaurant, number of people visiting the restaurant on the basis of tips, days, time etc. Give a pictorial representations of the above values on graph co-ordinates APPROACH: Using pandas, numpy and matplotlib we can split the data according to our liking and finding if any NaN values are present and replacing such values. Also we can then plot the bar graph or box plot of the above data. DELIVERABLES: 1. csv: tips.csv2. box plot: boxplot grouped by day & tip3. bar plot: barplot of day vs tips
###Code
import pandas
import numpy as np
data=pd.read_csv('tips.csv',sep=',',encoding='latin_1') # read csv file (filepath,separator generally',',encoding standard which here is west europe)
data # print out data variable
data.loc[(data["sex"]=="Female")&(data["smoker"]=="No")&(data["day"]=="Sun")&(data["time"]=="Dinner"),["sex","smoker","time","day"]]
# Separating the data by taking female, no smoking, sunday and dinner from original data using boolean indexing method.
# for female data
def num_missing(x): # Checking missing values
return sum(x.isnull())
print "Missing Values per Columns:"
print data.apply(num_missing, axis=0) # apply returns some value after passing each column(axis=0 i.e. column) of adata frame with some function(i.e. missing values here)
print "Missing Values per Rows:"
print data.apply(num_missing, axis=1).head() # axis=1 means rows
data.loc[(data["sex"]=="Male")&(data["smoker"]=="No")&(data["day"]=="Sun")&(data["time"]=="Dinner"),["sex","smoker","time","day"]]
# Separating the data by taking male, no smoking, sunday and dinner from original data using boolean indexing method
# for male data
def num_missing(x):
return sum(x.isnull())
print "Missing Values per Columns For Male:"
print data.apply(num_missing, axis=0)
print "Missing Values per Rows For Male:"
print data.apply(num_missing, axis=1).head()
# determine pivot table
impute_grps=data.pivot_table(values=["tip"], index=["sex","smoker","time","day"], aggfunc=np.mean)
# using pandas we can create MS Excel style pivot tables. for instance a key column is 'tip' which will be imputed using mean amount of each sex, smoker, time and day.
print impute_grps
# sorting data
data_sort=data.sort_values(['tip','total_bill'], ascending=False)
# pandas allow easy sorting based on multiple columns.
data_sort[['tip','total_bill']].head(10) # .head(10) will only take 10 values to diplay
#plotting boxplot
import matplotlib.pyplot as plt
data.boxplot(column="tip",by="day")
# plotting histogram
data.hist(column="tip",by="day",bins=30)
###Output
_____no_output_____ |
examples/_legacy/.ipynb_checkpoints/harmonic-analysis-checkpoint.ipynb | ###Markdown
HARMONIC ANALYSIS___
###Code
import os
import spectrai as spa
from scipy import signal
import pywt
import scaleogram as scg
import numpy as np
import matplotlib.pyplot as plt
# choose default wavelet function for the entire notebook
scg.set_default_wavelet('cmor1-1.5')
#import torch
#from kymatio import Scattering1D
#from kymatio.numpy import Scattering1D
%load_ext autoreload
%autoreload 2
%matplotlib inline
DATA_PATH = os.path.join('..', 'data')
VIETNAM_PATH = os.path.join('vnm-petra', 'mir-models')
DATA_URL = os.path.join(DATA_PATH, VIETNAM_PATH, '*.*')
VIETNAM_MEAS_URL = os.path.join(DATA_PATH, 'vnm-petra', 'mir-models', '20090215-soil-database-mirs.xls')
X, X_names, y, y_names, instances_id, _ = spa.load_data_petra(DATA_URL, VIETNAM_MEAS_URL)
print('X shape: ', X.shape)
print(y_names)
print(instances_id)
X.shape
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
ax.set_xlim(4000, 600)
_ = ax.plot(X_names, X.T)
###Output
_____no_output_____
###Markdown
Discrete Fourier Transform*Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fft.htmlnumpy.fft.fft*
###Code
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
ax.set_xlim(4000, 600)
_ = ax.plot(X_names, X[0])
dft = np.fft.fft(X[0])
type(dft[0])
dft_abs = np.abs(dft)
N = X.shape[1]//2 + 1
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
ax.set_xlim(0, 100)
_ = ax.plot(dft_abs/np.max(dft_abs))
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,8))
ax.set_xlim(0, 100)
#ax.set_ylim(0, 60)
_ = ax.plot(np.abs(np.fft.fft(X)).T)
X.shape
###Output
_____no_output_____
###Markdown
Filtering and iFFT Low-pass filter
###Code
k = 20
mask_from = k
mask_to = X.shape[1]-k
dft_filtered = np.fft.fft(X)
dft_filtered[:,mask_from:mask_to] = 0
#dft_filtered[:,0:10] = 0
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
#ax.set_xlim(4000, 600)
_ = ax.plot(np.fft.ifft(dft_filtered).real.T)
###Output
_____no_output_____
###Markdown
High-pass filter
###Code
mask_from = 0
mask_to = 50
dft_filtered = np.fft.fft(X)
dft_filtered[:,mask_from:mask_to] = 0
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
ax.set_xlim(0, 1750)
_ = ax.plot(np.fft.ifft(dft_filtered).real.T)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,4))
ax.set_xlim(4000, 600)
_ = ax.plot(X_names, X.T)
###Output
_____no_output_____
###Markdown
Short Time Fourier Transform and spectrogram
###Code
X[0].shape
def get_amp(x):
return np.abs(x.max() - x.min())
get_amp(X[0])
SAMPLE_IDX = 91
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,3))
ax.set_xlim(4000, 600)
_ = ax.plot(X_names, X[SAMPLE_IDX])
plt.ylabel('Absorbance')
amp = get_amp(X[0])
f, t, Zxx = signal.stft(X[SAMPLE_IDX], fs=1, window='hamming', nperseg=30)
fig, ax = plt.subplots(figsize=(16,4))
_ = ax.pcolormesh(t, f, np.log10(np.abs(Zxx)))
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]');
len(X[SAMPLE_IDX])
plt.figure(figsize=(16, 3))
plt.specgram(X[SAMPLE_IDX], Fs=400)
plt.title("Time-Frequency spectrogram of signal")
np.abs(Zxx).min()
###Output
_____no_output_____
###Markdown
WaveletsReferences:* http://ataspinar.com/2018/12/21/a-guide-for-using-the-wavelet-transform-in-machine-learning/* https://www.kaggle.com/asauve/a-gentle-introduction-to-wavelet-for-data-analysis
###Code
print(pywt.families(short=False))
SAMPLE_IDX = 91
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(16,3))
ax.set_xlim(4000, 600)
_ = ax.plot(X_names, X[SAMPLE_IDX])
plt.ylabel('Absorbance')
scales = scg.periods2scales(np.arange(1, 200))
scg.cws(X[SAMPLE_IDX], scales=scales, cbar=None, coi=True, figsize=(16,4));
###Output
_____no_output_____ |
Mideterm_Exam.ipynb | ###Markdown
Question 1. Create a Python code that displays a square matrix whose length is 5 (10 points)
###Code
import numpy as np
S = np.array ([[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5],[1,2,3,4,5]])
print(S)
###Output
[[1 2 3 4 5]
[1 2 3 4 5]
[1 2 3 4 5]
[1 2 3 4 5]
[1 2 3 4 5]]
###Markdown
Question 2. Create a Python code that displays a square matrix whose elements below the principal diagonal are zero (10 points)
###Code
import numpy as np
A = np.triu([[5,7,1,5,3],[1,4,2,9,4],[7,1,8,7,5],[6,8,2,4,2],[4,2,1,5,9]])
print(A)
###Output
[[5 7 1 5 3]
[0 4 2 9 4]
[0 0 8 7 5]
[0 0 0 4 2]
[0 0 0 0 9]]
###Markdown
Question 3. Create a Python code that displays a square matrix which is symmetrical (10 points)
###Code
import numpy as np
S = np.array([[1,2,3,4,5],[2,1,2,3,4],[3,2,1,2,3],[4,3,2,1,2],[5,4,3,2,1]])
print(S)
###Output
[[1 2 3 4 5]
[2 1 2 3 4]
[3 2 1 2 3]
[4 3 2 1 2]
[5 4 3 2 1]]
###Markdown
Question 4. What is the inverse of matrix C? Show your solution by python coding. (20 points)
###Code
import numpy as np
C = np.array ([[1,2,3],
[2,3,3],
[3,4,-2]])
i = np.linalg.inv(C)
print(i)
###Output
[[-3.6 3.2 -0.6]
[ 2.6 -2.2 0.6]
[-0.2 0.4 -0.2]]
###Markdown
Question 5. What is the determinant of the given matrix in Question 4? Show your solution by python coding. (20 points)
###Code
import numpy as np
C = np.array ([[1,2,3],
[2,3,3],
[3,4,-2]])
d = np.linalg.det(C)
print(int(d))
import numpy as np
eq = np.array([[5,4,1],
[10,9,4],
[10,13,15]])
const = np.array([[[3.4],
[8.8],
[19.2]]])
ro = np.linalg.inv(eq) @ const
print(ro)
###Output
[[[0.2]
[0.4]
[0.8]]]
|
templates/modelling/classification_pycaret.ipynb | ###Markdown
Classification Template Installing Pycaret
###Code
# !pip install pycaret --user
# !pip install pycaret-nightly --user
###Output
_____no_output_____
###Markdown
Import Libraries
###Code
import pandas as pd
from pycaret.classification import *
###Output
_____no_output_____
###Markdown
Import Dataset
###Code
# path to your dataset, can be a csv file or xlsx
dataset_path = "../Bank_Personal_Loan_Modelling_transformed.xlsx"
## use code as per the type of data source
## use below line to read data from csv file
## df = pd.read_csv(dataset_path)
df = pd.read_excel(dataset_path, index_col=0)
df.head()
target = 'Personal Loan'
###Output
_____no_output_____
###Markdown
Data Setup* See [here](https://github.com/pycaret/pycaret/blob/master/tutorials/Binary%20Classification%20Tutorial%20Level%20Beginner%20-%20%20CLF101.ipynb) for notebook example(basic level)* See [here](https://pycaret.org/classification/) for classification documentation.
###Code
# to-do: separate cat and numbers
data=setup(df,target=target, categorical_features=['Family', 'Education'],
train_size = 0.8, fold=5)
###Output
_____no_output_____
###Markdown
Comparing models and selecting top 3
###Code
#Selecting top3 models for tuning
top3_models=compare_models(n_select=3, fold=5)
print(top3_models)
# to-do
# separate notebooks, have basic ones in first
# then tune it using hyperparamaters
# then use ensembling, stacking and blending
# same for regression, classification
###Output
_____no_output_____
###Markdown
Tuning Models* Compare model just evaluates using the default hyperparameters, tune model will use cross validation to tune the models, here we will tune top 3 models selected in compare models. NOTE: hyperparameter tuning is performed in a separate notebook,
###Code
tune_model(top3_models[0])
# tune_model(top3_models[1])
# tune_model(top3_models[2])
#Tuning the top 3 models
#tuned_model_top3=[tune_model(i) for i in top3_models]
#print(tuned_model_top3)
###Output
_____no_output_____
###Markdown
Ensembling* Create ensemble using the top 3 tuned model NOTE: Notice the output of a cell is for last model executed. Not all three.
###Code
## Ensembling top 3 tuned models
# bagged_tuned_top3=[ensemble_model(i,method='Bagging') for i in tuned_model_top3]
# print(bagged_tuned_top3)
###Output
_____no_output_____
###Markdown
Blending Models
###Code
## Blend top3 models
# blender=blend_models(estimator_list=top3_models)
###Output
_____no_output_____
###Markdown
Stacking models
###Code
# stacker=stack_models(top3_models)
###Output
_____no_output_____
###Markdown
Plot Model results
###Code
plot_model(top3_models[0])
# plot_model(top3_models[1])
# plot_model(top3_models[2])
###Output
_____no_output_____
###Markdown
Evaluate Models
###Code
evaluate_model(top3_models[0])
# evaluate_model(top3_models[1])
# evaluate_model(top3_models[2])
###Output
_____no_output_____
###Markdown
2. Explainability Techniques * Read more on using pycaret models separately with SHAP* Or better how to work with SHAP in pycaret
###Code
# !pip install shap
final_model = top3_models[0]
final_model
import shap
explainer = shap.Explainer(final_model)
# data is the information grid returned by the setup method, type is tuple
# seems like second index contains the transformed dataframe
transformed_df = data[5]
transformed_df[target] = data[2]
transformed_df.head()
# #Error in this cell
# shap_values = explainer(transformed_df, check_additivity=False)
# shap.plots.waterfall(shap_values[0])
# shap.initjs()
## Visualize first prediction
# shap.plots.force(shap_values[2])
# display(shap.plots.force(explainer.expected_value[0], shap_values[0]))
###Output
_____no_output_____
###Markdown
SHAP on sample dataset
###Code
# !pip install xgboost
import xgboost
import shap
# train an XGBoost model
X, y = shap.datasets.boston()
model = xgboost.XGBRegressor().fit(X, y)
# explain the model's predictions using SHAP
# (same syntax works for LightGBM, CatBoost, scikit-learn, transformers, Spark, etc.)
explainer = shap.Explainer(model)
shap_values = explainer(X)
shap_values
# visualize the first prediction's explanation
shap.plots.waterfall(shap_values[0])
#Visualize all predictions
shap.plots.scatter(shap_values[:,"AGE"],shap_values)
shap.plots.beeswarm(shap_values)
###Output
_____no_output_____
###Markdown
Interpretation using Pycaret SHAP implementation* Note that is only supports tree based model, read more [here](https://pycaret.org/classification/).
###Code
top3_models
#[interpret_model(i) for i in best_model]
interpret_model(top3_models[1],)
###Output
_____no_output_____
###Markdown
Not optimizing in this step
###Code
# , use_holdout = False
# final_model=automl(optimize = 'Accuracy')
print(top3_models)
###Output
[XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1,
importance_type='gain', interaction_constraints='',
learning_rate=0.300000012, max_delta_step=0, max_depth=6,
min_child_weight=1, missing=nan, monotone_constraints='()',
n_estimators=100, n_jobs=-1, num_parallel_tree=1,
objective='binary:logistic', random_state=272, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='auto',
use_label_encoder=True, validate_parameters=1, verbosity=0), GradientBoostingClassifier(ccp_alpha=0.0, criterion='friedman_mse', init=None,
learning_rate=0.1, loss='deviance', max_depth=3,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100,
n_iter_no_change=None, presort='deprecated',
random_state=272, subsample=1.0, tol=0.0001,
validation_fraction=0.1, verbose=0,
warm_start=False), RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=None, max_features='auto',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100,
n_jobs=-1, oob_score=False, random_state=272, verbose=0,
warm_start=False)]
###Markdown
Saving final model
###Code
# from google.colab import drive
# drive.mount('/content/gdrive')
type(top3_models[0])
for model in top3_models:
model_name = model.__class__.__name__
save_model(model,'./saved_models/{0}'.format(model_name))
print('{0} model saved!'.format(model_name))
transformed_df.head()
transformed_df.to_excel('../Bank_Personal_Loan_Modelling_transformed.xlsx')
###Output
_____no_output_____ |
Computer Science/eat_tensorflow2_in_30_days/practice/4_low level API/1_the structure operation of tensor/main.ipynb | ###Markdown
一,创建张量
###Code
import numpy as np
import tensorflow as tf
a = tf.constant([1, 2, 3])
a
b = tf.range(1, 10, 2)
b
c = tf.linspace(0.0, 2*3.14, 100)
c
d = tf.zeros([3, 3])
d
a = tf.ones([3, 3])
b = tf.zeros_like(a)
a, b
b = tf.fill([3, 2], 5)
b
tf.random.set_seed(1.0)
a = tf.random.uniform([5, 2], minval=0, maxval=10)
a
b = tf.random.normal([3, 3], mean=0.0, stddev=1.0)
b
c = tf.random.truncated_normal([5, 5], mean=0.0, stddev=1.0)
c
I = tf.eye(3,3)
t = tf.linalg.diag([1, 2, 3])
I, t
###Output
_____no_output_____
###Markdown
二,索引切片
###Code
tf.random.set_seed(3)
t = tf.random.uniform([5,5], minval=0, maxval=10, dtype=tf.int32)
t
t[0]
t[-1]
t[1,3], t[1][3], t[1,3]==t[1][3]
t[1:4], tf.slice(t, [1,0], [3,-1])
t[1:, :4:2]
x = tf.Variable([[1,2],[3,4]])
x[1, :].assign(tf.constant([0, 0]))
x
a = tf.random.uniform([3, 3, 3], minval=0, maxval=10, dtype=tf.int32)
a
a[...,0]
scores = tf.random.uniform([4, 10, 7], minval=0, maxval=100, dtype=tf.int32)
scores
p = tf.gather(scores, [0, 5, 9], axis=1)
p
q = tf.gather(tf.gather(scores, [0, 5, 9], axis=1), [1, 3, 6], axis=2)
q
s = tf.gather_nd(scores, indices=[[0, 0], [2, 4], [3, 6]])
s
p = tf.boolean_mask(scores, [1, 0, 0, 0, 0, 1, 0, 0, 0, 1], axis=1)
p
s = tf.boolean_mask(
scores,
[[True,False,False,False,False,False,False,False,False,False],
[False,False,False,False,False,False,False,False,False,False],
[False,False,False,False,True,False,False,False,False,False],
[False,False,False,False,False,False,True,False,False,False]]
)
s
c = tf.constant([[-1,1,-1],[2,2,-2],[3,-3,3]])
bool_c = tf.boolean_mask(c, c<0)
c, c[c>1], bool_c
c = tf.constant([[-1, 1, -1], [2, 2, -2], [3, -3, 3]], dtype=tf.float32)
d = tf.where(c<0, tf.fill(c.shape, np.nan), c)
d
indices = tf.where(c<0)
indices, tf.scatter_nd([[0,0], [2,1]], [c[0,0], c[2,1]], c.shape)
d = c - tf.scatter_nd([[0,0], [2,1]], [c[0,0], c[2,1]], c.shape)
d, tf.gather_nd(c, indices)
indices = tf.where(c<0)
tf.scatter_nd(indices, tf.gather_nd(c, indices), c.shape)
###Output
_____no_output_____
###Markdown
三,维度变换
###Code
a = tf.random.uniform(
shape=[1,3,3,2],
minval=0,
maxval=255,
dtype=tf.int32
)
a
s = tf.squeeze(a)
s
d = tf.expand_dims(s, axis=0)
d
# Batch,Height,Width,Channel
a = tf.random.uniform(shape=[100,600,600,4],minval=0,maxval=255,dtype=tf.int32)
tf.print(a.shape)
# 转换成 Channel,Height,Width,Batch
s= tf.transpose(a,perm=[3,1,2,0])
tf.print(s.shape)
###Output
TensorShape([100, 600, 600, 4])
TensorShape([4, 600, 600, 100])
###Markdown
四,合并分割
###Code
a = tf.constant([[1.0,2.0],[3.0,4.0]])
b = tf.constant([[5.0,6.0],[7.0,8.0]])
c = tf.constant([[9.0,10.0],[11.0,12.0]])
tf.concat([a, b, c], axis=0)
tf.concat([a, b, c], axis=1)
tf.stack([a, b, c])
tf.stack([a, b, c], axis=1)
a = tf.constant([[1.0,2.0],[3.0,4.0]])
b = tf.constant([[5.0,6.0],[7.0,8.0]])
c = tf.constant([[9.0,10.0],[11.0,12.0]])
c = tf.concat([a,b,c],axis = 0)
c
tf.split(c, 3, axis=0)
tf.split(c, [2, 2, 2], axis=0)
###Output
_____no_output_____ |
projects/3)prospective_tariff.ipynb | ###Markdown
Определение перспективного тарифа для телеком компании Нашей задачей является анализ тарифных планов компании «Мегалайн» — федерального оператора сотовой связи. Клиентам предлагают два тарифных плана: «Смарт» и «Ультра». Чтобы скорректировать рекламный бюджет, коммерческий департамент хочет понять, какой тариф приносит больше денег.В проекте будет реализован предварительный анализ тарифов на небольшой выборке клиентов. У нас есть данные 500 пользователей «Мегалайна»: кто они, откуда, каким тарифом пользуются, сколько звонков и сообщений каждый отправил за 2018 год. В данном проекте мы проанализируем поведение клиентов и сделаем вывод — какой тариф лучше.**Описание данных**Таблица `users` (информация о пользователях):* `user_id` — уникальный идентификатор пользователя*`first_name` — имя пользователя* `last_name` — фамилия пользователя* `age` — возраст пользователя (годы)* `reg_date` — дата подключения тарифа (день, месяц, год)* `churn_date` — дата прекращения пользования тарифом (если значение пропущено, то тариф ещё действовал на момент выгрузки данных)* `city` — город проживания пользователя* `tariff` — название тарифного планаТаблица `calls` (информация о звонках):* `id` — уникальный номер звонка* `call_date` — дата звонка* `duration` — длительность звонка в минутах* `user_id` — идентификатор пользователя, сделавшего звонокТаблица `messages` (информация о сообщениях):* `id` — уникальный номер сообщения* `message_date` — дата сообщения* `user_id` — идентификатор пользователя, отправившего сообщениеТаблица internet (информация об интернет-сессиях):* `id` — уникальный номер сессии* `mb_used` — объём потраченного за сессию интернет-трафика (в мегабайтах)* `session_date` — дата интернет-сессии* `user_id` — идентификатор пользователяТаблица `tariffs` (информация о тарифах):* `tariff_name` — название тарифа* `rub_monthly_fee` — ежемесячная абонентская плата в рублях* `minutes_included` — количество минут разговора в месяц, включённых в абонентскую плату* `messages_included` — количество сообщений в месяц, включённых в абонентскую плату* `mb_per_month_included` — объём интернет-трафика, включённого в абонентскую плату (в мегабайтах)* `rub_per_minute` — стоимость минуты разговора сверх тарифного пакета (например, если в тарифе 100 минут разговора в месяц, то со 101 минуты будет взиматься плата)* `ub_per_message` — стоимость отправки сообщения сверх тарифного пакета* `rub_per_gb` — стоимость дополнительного гигабайта интернет-трафика сверх тарифного пакета (1 гигабайт = 1024 мегабайта) * [Шаг 1. Откройте файл с данными и изучите общую информацию.](step1)* [Шаг 2. Подготовьте данные](step2)* [Шаг 3. Проанализируйте данные](step3)* [Шаг 4. Проверьте гипотезы](step4)* [Шаг 5. Напишите общий вывод](step5) Шаг 1. Откройте файл с данными и изучите общую информацию
###Code
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
sns.set();
pd.set_option('display.max_columns', None)
import numpy as np
from scipy import stats as st
calls = pd.read_csv('../datasets/prospective_tariff_calls.csv')
calls.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 202607 entries, 0 to 202606
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 202607 non-null object
1 call_date 202607 non-null object
2 duration 202607 non-null float64
3 user_id 202607 non-null int64
dtypes: float64(1), int64(1), object(2)
memory usage: 6.2+ MB
###Markdown
Понизим разрядность столбцов duration и user_id для оптимизации работы кода.
###Code
calls['duration'] = pd.to_numeric(calls['duration'], downcast='float')
calls['user_id'] = pd.to_numeric(calls['user_id'], downcast='signed')
display(calls.sample(5))
internet = pd.read_csv('../datasets/prospective_tariff_internet.csv')
internet.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 149396 entries, 0 to 149395
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 149396 non-null int64
1 id 149396 non-null object
2 mb_used 149396 non-null float64
3 session_date 149396 non-null object
4 user_id 149396 non-null int64
dtypes: float64(1), int64(2), object(2)
memory usage: 5.7+ MB
###Markdown
Понизим разрядность столбцов mb_used и user_id для оптимизации работы кода.
###Code
internet['mb_used'] = pd.to_numeric(internet['mb_used'], downcast='float')
internet['user_id'] = pd.to_numeric(internet['user_id'], downcast='signed')
display(internet.sample(5))
messages = pd.read_csv('../datasets/prospective_tariff_messages.csv')
messages.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 123036 entries, 0 to 123035
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 123036 non-null object
1 message_date 123036 non-null object
2 user_id 123036 non-null int64
dtypes: int64(1), object(2)
memory usage: 2.8+ MB
###Markdown
Понизим разрядность столбца user_id для оптимизации работы кода.
###Code
messages['user_id'] = pd.to_numeric(messages['user_id'], downcast='signed')
display(messages.sample(5))
tariffs = pd.read_csv('../datasets/prospective_tariff_tariffs.csv')
tariffs.info()
display(tariffs)
###Output
_____no_output_____
###Markdown
Переименуем название последнего столбца 'tariff_name' на 'tariff'для упрощения дальнейшей работы.
###Code
tariffs.columns = ['messages_included', 'mb_per_month_included', 'minutes_included', 'rub_monthly_fee',
'rub_per_gb', 'rub_per_message', 'rub_per_minute', 'tariff']
users = pd.read_csv('../datasets/prospective_tariff_users.csv')
users.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 500 entries, 0 to 499
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user_id 500 non-null int64
1 age 500 non-null int64
2 churn_date 38 non-null object
3 city 500 non-null object
4 first_name 500 non-null object
5 last_name 500 non-null object
6 reg_date 500 non-null object
7 tariff 500 non-null object
dtypes: int64(2), object(6)
memory usage: 31.4+ KB
###Markdown
Понизим разрядность столбцов user_id и age для оптимизации работы кода.
###Code
users['user_id'] = pd.to_numeric(users['user_id'], downcast='signed')
users['age'] = pd.to_numeric(users['age'], downcast='signed')
display(users.sample(5))
###Output
_____no_output_____
###Markdown
У нас имеются 5 таблиц с данными: - calls (информация о звонках), - internet (информация об интернет-сессиях), - messages (информация о сообщениях), - tariffs (информация о тарифах),- users (информация о пользователях) Для дальнейшей работы необходимо объединить данные таблицы в одну, сгруппировав информацию по месяцам, уникальным пользователям и сумме израсходованных минут, использованных мегабайт и количеству отправленных сообщений. Шаг 2. Подготовьте данные **Таблица calls**
###Code
calls.info()
calls['month'] = pd.DatetimeIndex(calls['call_date']).month #Нахождение месяца звонка
###Output
_____no_output_____
###Markdown
Поскольку в условии было обозначено, что «Мегалайн» всегда округляет вверх значения минут и мегабайтов, поэтому округляем продолжительность разговора в минутах в бОльшую сторону.
###Code
calls['duration_round'] = calls['duration'].map(math.ceil)
print('Количество звонков со значением 0 минут: {:.2%}'.format(len(calls[calls['duration'] <= 0])/len(calls['duration'])))
###Output
Количество звонков со значением 0 минут: 19.55%
###Markdown
В данных почти 20% звонков со значением 0 минут (а мы не знаем природу появления данных нулей - это может быть как пропущенные звонки, так и ошибки в данных). Допустим таким образом обозначены пропущенные звонки, поскольку мы будем считать суммарное количество минут каждого абонента по месяцам, пропущенные значения не должны сильно исказить результат.
###Code
calls.head(20)
calls_pivot = calls.pivot_table(index = ['user_id', 'month'], values = 'duration_round', aggfunc = ['count', 'sum'])
calls_pivot = calls_pivot.reset_index()
calls_pivot.columns = ['user_id', 'month', 'calls_count', 'duration_calls_sum']
calls_pivot.head()
calls_pivot[calls_pivot['duration_calls_sum'] == 0]
###Output
_____no_output_____
###Markdown
Получили, что у нескольких абонентов за месяц не было ни одной проговоренной минуты, а таких абонентов 3 шт - с user_id 1240, 1257, 1373. Количество звонков за месяц у них также небольшое - 1 или 2. Можно предположить, что возможно люди в эти месяцы куда-то уезжали и не разговаривали по мобильному телефону, либо у них 2 телефона и они используют один из них исключительно для интернета и т.д.
###Code
ax = calls_pivot.groupby(calls_pivot['month']).agg({'calls_count': 'count', 'duration_calls_sum': 'median'}).\
plot.bar(figsize=(15, 7), rot=0, width=0.7)
ax.set_title('График суммарного количества уникальных пользователей и медианных значений продолжительности звонков', size=18)
ax.set_xlabel('Номер месяца')
ax.set_ylabel('Количество звонков и медианное количество минут')
ax.set_ylim(None, 600)
for p in ax.patches:
ax.annotate(str(round(p.get_height())), (p.get_x() * 1, p.get_height() * 1.03))
plt.show()
###Output
_____no_output_____
###Markdown
По графику видим, что количество уникальных пользователей каждый месяц линейно растет. Наибольшее медианное значение суммарного количества использованных минут составляет 492 в декабре месяце, а наименьшее медианное количество минут составляет 230 в январе месяце. По графику видим, что с каждым месяцем привлекается все больше клиентов.
###Code
plt.figure(figsize=(15,5))
plt.title('Количество звонков в зависимости от месяца', size=16)
ax= calls_pivot.groupby(calls_pivot['month'])['calls_count'].sum().plot(kind='bar', rot=0)
for p in ax.patches:
ax.annotate(str(round(p.get_height())), (p.get_x() * 1, p.get_height() * 1.03))
plt.xlabel('Номер месяца')
plt.ylabel('Количество звонков')
plt.ylim(None, 36000)
plt.show()
###Output
_____no_output_____
###Markdown
Также наблюдается линейный рост количества звонков от месяца к месяцу (скорее всего из-за увелечения количества абонентов). Напишем функцию для автоматического построения графиков:
###Code
def plot_subs(feature, data,
suptitle, title1, title2, xlabel1, xlabel2, ylabel1, ylabel2, xlim=None):
'''
input:
feature - название исследуемого признака
data - данные, по которым строим график
suptitle - наименование графиков
title1, title2 - название гистограммы и боксплота
xlabel1, xlabel2 - название оси X гистограммы и боксплота
ylabel1, ylabel2 - название оси Y гистограммы и боксплота,
xlim - ограничение по оси X
output:
None
'''
fig, axs = plt.subplots(1, 2, figsize=(18,7))
plt.suptitle(suptitle, size=18)
#Построение гистограммы
data[feature].hist(bins=100, edgecolor='black', ax=axs[0])
axs[0].set_title(title1, size=15)
axs[0].set_xlabel(xlabel1, size=13)
axs[0].set_ylabel(ylabel1, size=13)
#Построение боксплота
sns.boxplot(x=feature, data=data, ax=axs[1])
axs[1].set_title(title2, size=15)
axs[1].set_xlabel(xlabel2, size=13)
axs[1].set_ylabel(ylabel2, size=13)
if xlim is not None:
axs[0].set_xlim(xlim)
axs[1].set_xlim(xlim)
plt.show()
###Output
_____no_output_____
###Markdown
Построим гистограмму и боксплот распределения израсходованных минут.
###Code
plot_subs('duration_calls_sum', calls_pivot, 'Распределение проговоренных минут',
'Гистограмма распределения суммарного количества минут', 'Диаграмма размаха рапределения суммарного количества минут',
'Суммарное количество минут', 'Суммарное количество минут',
'Количество абонентов', 'Количество абонентов')
###Output
_____no_output_____
###Markdown
По гистограмме видим, что наибольшее количество абонентов использует почти 500 минут в сумме, по диаграмме размаха можно сказать, что нормальные значения минут находятся в диапазоне от 0 до 1050 минут, все значения выходящие за данную отметку - выбросы.
###Code
calls_pivot.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3174 entries, 0 to 3173
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user_id 3174 non-null int64
1 month 3174 non-null int64
2 calls_count 3174 non-null int64
3 duration_calls_sum 3174 non-null int64
dtypes: int64(4)
memory usage: 99.3 KB
###Markdown
**Таблица internet**
###Code
internet['month'] = pd.DatetimeIndex(internet['session_date']).month #Нахождение месяца интернет сессии
display(internet.sample(5))
###Output
_____no_output_____
###Markdown
Поскольку в условии было обозначено, что «Мегалайн» всегда округляет вверх значения минут и мегабайтов, поэтому округляем мегабайты в бОльшую сторону.
###Code
internet['mb_used_round'] = internet['mb_used'].map(math.ceil)
internet_pivot = internet.pivot_table(index=['user_id', 'month'], values = 'mb_used_round', aggfunc = ['count', 'sum'])
internet_pivot = internet_pivot.reset_index()
internet_pivot.columns = ['user_id', 'month', 'count_mb', 'mb_used_round_sum']
internet_pivot[internet_pivot['mb_used_round_sum'] == 0]
###Output
_____no_output_____
###Markdown
2 абонента вообще не расходовали мегабайты интернета в течение месяца.
###Code
plt.figure(figsize=(15,5))
plt.title('Медианное значение объема трафика в зависимости от месяца', size = 18)
ax = internet_pivot.groupby(internet_pivot['month'])['mb_used_round_sum'].median().plot(kind='bar', rot=0)
for p in ax.patches:
ax.annotate(str(round(p.get_height())), (p.get_x() * 1, p.get_height() * 1.03))
plt.xlabel('Номер месяца')
plt.ylabel('Медианное значение объема трафика')
plt.ylim(None, 20000)
plt.show()
###Output
_____no_output_____
###Markdown
По графику видим, что наибольшее медианное значение израсходованного интернета 18 ГБ было в декабре, в январе же было наименьшее медианное значение израсходованного интернета - почти 8 ГБ. Построим гистограмму и боксплот.
###Code
plot_subs('mb_used_round_sum', internet_pivot, 'Распределение израсходованного трафика интернета',
'Гистограмма распределения израсходованного интернета', 'Диаграмма размаха израсходованного интернета',
'Суммарный объем израсходованного трафика интернета', 'Суммарный объем израсходованного трафика интернета',
'Количество абонентов', 'Количество абонентов')
###Output
_____no_output_____
###Markdown
Из графика "Гистограмма распределения израсходованного интернета" видим, что наибольшее количество абонентов используют примерно 15 ГБ трафика. Гистограмма имеет форму колокола и стремится к нормальному распределению. Нормальные значения суммарного объема израсходованного трафика находятся в диапазоне от 0 до 35 ГБ. Все, что находится за пределами 35 ГБ будет являться выбросами
###Code
internet_pivot.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3203 entries, 0 to 3202
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user_id 3203 non-null int64
1 month 3203 non-null int64
2 count_mb 3203 non-null int64
3 mb_used_round_sum 3203 non-null int64
dtypes: int64(4)
memory usage: 100.2 KB
###Markdown
Соединим таблицу internet_pivot с calls_pivot.
###Code
internet_merge = calls_pivot.merge(internet_pivot, on=['user_id', 'month'], how='outer')
###Output
_____no_output_____
###Markdown
**Таблица messages**
###Code
messages['month'] = pd.DatetimeIndex(messages['message_date']).month #Нахождение месяца, когда было отправлено сообщение
messages.sample(5)
###Output
_____no_output_____
###Markdown
Создание сводной таблицы для анализа количества сообщений.
###Code
messages['user_id1'] = messages['user_id']
messages_pivot = messages.pivot_table(index=['user_id', 'month'], values='user_id1', aggfunc='count')
messages_pivot = messages_pivot.reset_index()
messages_pivot.columns = ['user_id', 'month', 'messages_count']
plt.figure(figsize=(15,5))
plt.title('График медианного количества сообщений в зависимости от месяца', size = 18)
ax = messages_pivot.groupby(messages_pivot['month'])['messages_count'].median().plot(kind='bar', rot=0)
for p in ax.patches:
ax.annotate(str(round(p.get_height())), (p.get_x() * 1, p.get_height() * 1.03))
plt.xlabel('Номер месяца')
plt.ylabel('Медианное значение количества сообщений')
plt.ylim(None, 50)
plt.show()
###Output
_____no_output_____
###Markdown
Наибольшее среднее значение по отправленным смс сообщениям наблюдается в январе 44 шт в месяц, при этом наименьшее медианное значение наблюдается в январе - 22 сообщения в месяц.
###Code
plot_subs('messages_count', messages_pivot, 'Распределение отправленных сообщений',
'Гистограмма распределения количества сообщений', 'Диаграмма размаха рапределения суммарного количества отправленных смс',
'Суммарное количество отправленных смс', 'Суммарное количество отправленных смс',
'Количество абонентов', 'Количество абонентов')
###Output
_____no_output_____
###Markdown
Наибольшее количество абонентов отправляют в месяц примерно 25 сообщений. Нормальные значения количества отправленных смсм сообщений сосредоточены в диапазоне от 1 до 60 сообщений. Все, что свыше этого диапазона - выбросы. Соединим таблицу messages_pivot с таблицей internet_merge.
###Code
messages_merge = internet_merge.merge(messages_pivot, on=[ 'user_id', 'month'], how='outer')
###Output
_____no_output_____
###Markdown
Далее соединим таблицу messages_merge с таблицей users.
###Code
users_merge = messages_merge.merge(users, on='user_id', how='left')
###Output
_____no_output_____
###Markdown
И, объединим получившуюся таблицу с таблицей tariffs.
###Code
df = users_merge.merge(tariffs, on='tariff', how='left')
###Output
_____no_output_____
###Markdown
Посмотрим таблицу, которая получилась.
###Code
df.sample(10)
print(f'Количество клиентов: {df.user_id.nunique()}')
#Просмотр сколько памяти занимает обработка и количество пропусков в датафрейме
df.info(memory_usage='deep')
df['city'].unique()
df.describe().T
###Output
_____no_output_____
###Markdown
Переведем значения типа object в категориальный тип.
###Code
#Создание категориального листа для перевода столбцов из типа данных object в тип данных category для оптимизации работы кода
cat_list = ['city', 'first_name', 'last_name', 'reg_date', 'tariff']
df[cat_list] = df[cat_list].astype('category') #Перевод столбцов из типа данных object в тип category
###Output
_____no_output_____
###Markdown
Заполним нулями пропущенные значения в данных, предполагая, что если после слияния таблиц выяснилось, что данные о написанных сообщениях, исходящих звонках и использованных мегабайтах отсутствовали, значит абонент предпочитал звонки остальным видам сервиса, и просто не пользовался включенными функциями.
###Code
#Создание листа с наименованием столбцов, в которых необходимо заполнить пустые значения нулями
fill_na_list = ['calls_count', 'duration_calls_sum', 'count_mb', 'mb_used_round_sum', 'messages_count']
df[fill_na_list] = df[fill_na_list].fillna(0)
#Перевод данных в тип int
float_list = ['calls_count', 'duration_calls_sum', 'count_mb', 'mb_used_round_sum', 'messages_count']
df[float_list] = df[float_list].astype('int')
#Cколько таблица использует memory usage после обработки
df.info(memory_usage='deep')
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 3214 entries, 0 to 3213
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 user_id 3214 non-null int64
1 month 3214 non-null int64
2 calls_count 3214 non-null int32
3 duration_calls_sum 3214 non-null int32
4 count_mb 3214 non-null int32
5 mb_used_round_sum 3214 non-null int32
6 messages_count 3214 non-null int32
7 age 3214 non-null int8
8 churn_date 187 non-null object
9 city 3214 non-null category
10 first_name 3214 non-null category
11 last_name 3214 non-null category
12 reg_date 3214 non-null category
13 tariff 3214 non-null category
14 messages_included 3214 non-null int64
15 mb_per_month_included 3214 non-null int64
16 minutes_included 3214 non-null int64
17 rub_monthly_fee 3214 non-null int64
18 rub_per_gb 3214 non-null int64
19 rub_per_message 3214 non-null int64
20 rub_per_minute 3214 non-null int64
dtypes: category(5), int32(5), int64(9), int8(1), object(1)
memory usage: 582.0 KB
###Markdown
Напишем функцию, которая будет подсчитывать помесячную выручку с каждого пользователя при превышении, включенных в тарифный план минут, смс и количества Гб.
###Code
#Функция, для подсчета использованных сверх лимита смс, минут и Гб в денежном эквиваленте
def income(row):
minutes = (row['duration_calls_sum'] - row['minutes_included']) * row['rub_per_minute']
sms = (row['messages_count'] - row['messages_included']) * row['rub_per_message']
gb = np.ceil(((row['mb_used_round_sum'] - row['mb_per_month_included']) / 1024))* row['rub_per_gb']
over_limit = 0
for i in [minutes, sms, gb]:
if i>0:
over_limit += i
return over_limit
df['over_limit'] = df.apply(income, axis=1)
df.sample(5)
###Output
_____no_output_____
###Markdown
Добавим в таблицу столбец с помесячной выручкой от каждого абонента с включением абонентской платы.
###Code
df['income_sum'] = df['over_limit'] + df['rub_monthly_fee']
df.head()
###Output
_____no_output_____
###Markdown
Построим график линейной зависимости помесячной выручки в зависимости от тарифа.
###Code
plot_data = df.groupby(['tariff', 'month'])['income_sum'].sum().reset_index()
plt.figure(figsize=(10, 5))
plt.suptitle('Грифик линейной зависимости помесячной выручки в зависимости от тарифа', size=18)
sns.lineplot(data = plot_data, x='month', y='income_sum', hue='tariff', palette='deep', legend='brief')
plt.xlabel('Номер месяца', size=12)
plt.ylabel('Суммарная выручка по тарифу', size=12)
plt.show()
###Output
_____no_output_____
###Markdown
По графику видим, что суммарная выручка по каждому тарифу линейно растет. По тарифу "Смарт" она возрастает более резко, а значит для бизнеса в 2018 году тариф был более выгоден главным образом за счет использования Гб и минут сверх лимита. Шаг 3. Проанализируйте данные **Сколько минут разговора, сколько сообщений и какой объём интернет-трафика требуется пользователям каждого тарифа в месяц** Для начала построим сводную таблицу со средними и медианными значениями всех показателей, а именно: сумма продолжительности звонков, количество сообщений и количество мегабайт в месяц.
###Code
tariff_pivot = df.pivot_table(index='month', columns='tariff',
values=['duration_calls_sum', 'messages_count', 'mb_used_round_sum'],
aggfunc=['median', 'mean'])
tariff_pivot
###Output
_____no_output_____
###Markdown
Из сводной таблицы видим: - клиенты тарифа "Смарт" и "Ультра" в среднем не выговаривают все количество минут, которое предоставляет им оператор за месяц. И в принципе средние и медианные значения израсходованных минут не сильно отличаются у двух тарифов. - Почти во все месяцы на тарифе Смарт клиенты выходили за пределы предоставляемого трафика интернета - 15360 МБ в месяц. Для бизнеса это выгодно, компания может получать прибыль с дополнительного предоставления интернета. Но, с другой стороны, в долгосрочной перспективе это может привести к тому, что клиенты начнут переходить на другие операторы связи с более выгодными условиями.- С сообщениями ситуация такая же как и с минутами, на всех тарифах в среднем люди не выходят за пределы предоставляемого лимита. Построим 'Гистограммы распределения израсходованного трафика по звонкам и интернету тарифа "Смарт"' для того чтобы проверить предположение, что люди предпочитают использовать интернет и таким образом созваниваться друг с другом (через сервисы Whatsapp или Telegram, например) чем использовать мобильную связь.
###Code
fig, axs = plt.subplots(1, 2, figsize=(18,7))
plt.suptitle('Гистограммы распределения израсходованного трафика по звонкам и интернету тарифа "Смарт"', size=18)
#Построение гистограммы израсходованных минут тарифа Смарт
df[df['tariff'] == 'smart']['duration_calls_sum'].hist(bins=50, ax=axs[0], edgecolor='black')
axs[0].set_title('Гистограмма распределения израсходованных минут тарифа Смарт', size=14)
axs[0].set_xlabel('Сумма израсходованных минут', size=13)
axs[0].set_ylabel('Количество абонентов', size=13)
#Построение гистограммы использованных гигабайт тарифа Смарт
df[df['tariff'] == 'smart']['mb_used_round_sum'].hist(bins=50, ax=axs[1], edgecolor='black')
axs[1].set_title('Гистограмма распределения израсходованных Мб тарифа Смарт', size=14)
axs[1].set_xlabel('Количество использованных Мб', size=13)
axs[1].set_ylabel('Количество пользователей', size=13)
plt.show()
###Output
_____no_output_____
###Markdown
По первой гистограмме видно, что наибольшая группа абонентов тарифа Смарт используют в месяц 350 - 360 минут или 400-530 минут, что укладывается в лимит предоставляемого трафика (500 звонков в месяц). По мере расходования все большего количества минут после 500 лимитированных,количество абонентов, согласных доплачивать за дополнительные минуты снижается, что логично. И тем не менее, у гистограммы мы видим "хвост", находятся люди в нашей выборке, которые много разговаривают по мобильной связи, и котором сильно не хватает лимита в 500 минут, и которым было бы выгоднее перейти на тариф Ультра с предоставляемым лимитом в 3000 минут. Распределение схоже с Пуассоновским и смещено влево, обрезано на нуле. Гистограмма распределения потраченных Мб на тарифе Смарт напоминает колокол, скорее симметрична и стремится к нормальному распределению. Мы видим, что наибольшее число пользователей используют примерно 15 100 Мб, что составляет практически весь лимит трафика. Достаточно большое количество людей (около 120) используют трафик в размере 20000 Мб, видимо покупая недостающие Мб (или почти 5 Гб). После этого значения наблюдается резкое снижение частоты от центра к границам допуска. Соответственно, все меньше и меньше людей покупают дополнитеные Гб интернета, хотя находятся в нашей выборке и такие, которые используют в месяц 38 000 Мб, что примерно равно 38 Гб, соответственно, в их тариф включено 15 Гб, а они дополнительно покупают еще 23 Гб. Им было бы более выгодно выбрать вместо этого тариф "Ультра". Построим 'Гистограммы распределения израсходованного трафика по звонкам и интернету тарифа "Ультра" для того чтобы проверить предположение, что люди предпочитают использовать интернет и таким образом созваниваться друг с другом (через сервисы Whatsapp или Telegram, например) чем использовать мобильную связь.
###Code
fig, axs = plt.subplots(1, 2, figsize=(18,7))
plt.suptitle('Гистограммы распределения израсходованного трафика по звонкам и интернету тарифа "Ультра"', size=18)
#Построение гистограммы израсходованных минут тарифа Ультра
df[df['tariff'] == 'ultra']['duration_calls_sum'].hist(bins=50, ax=axs[0], edgecolor='black')
axs[0].set_title('Гистограмма распределения израсходованных минут тарифа Ультра', size=14)
axs[0].set_xlabel('Сумма израсходованных минут', size=13)
axs[0].set_ylabel('Количество абонентов', size=13)
#Построение гистограммы использованных мегабайт тарифа Ультра
df[df['tariff'] == 'ultra']['mb_used_round_sum'].hist(bins=50, ax=axs[1], edgecolor='black')
axs[1].set_title('Гистограмма распределения израсходованных Мб тарифа Ультра', size=14)
axs[1].set_xlabel('Количество использованных Мб', size=13)
axs[1].set_ylabel('Количество пользователей', size=13)
plt.show()
###Output
_____no_output_____
###Markdown
Первая гистограмма - распределения израсходованных минут тарифа ультра несимметрична и неравномерна. Самый большой пик наблюдается в диапазоне 0-33 минут. Можно предположить, что абоненты, входящие в эту группу, не склонны использовать мобильную связь, а используют вместо нее интернет для звонков. А возможно они изначально выбрали такой тариф ради большого трафика на интернет. Наибольшая группа людей, пользующаяся тарифом "Ультра" использует минуты в диапазоне 250 - 750 минут. Никто из абонентов не расходует полное количество минут, входящих в трафик. Проанализируем гистограмму распределения израсходованных Мб тарифа Ультра. Гистограмма крайне не равномерна, наблюдаются пики и падения по всей диаграмме с некоторой периодичностью.Выборка пользователей не равномерная, встречаются как люди, которые используют высокое количество Мб (например около 48 человек используют 22 000 мб), так и те, которые пользуются интернетом крайне мало (до 500 Гб). В "хвосте" находятся пользователи, которые доплачивают за дополнительные Гб интернета. Даже есть те, которые доплачивают почти за 20 Гб интернета. Построим гистограммы распределения количества сообщений на тарифе Смарт и Ультра.
###Code
fig, axs = plt.subplots(1, 2, figsize=(18,7))
plt.suptitle('Гистограммы распределения израсходованного лимита сообщений на тарифе "Смарт" и "Ультра"', size=18)
#Построение гистограммы израсходованных минут тарифа Смарт
df[df['tariff'] == 'smart']['messages_count'].hist(bins=50, ax=axs[0], edgecolor='black')
axs[0].set_title('Гистограмма распределения количества сообщений тарифа Смарт', size=14)
axs[0].set_xlabel('Количество отправленных сообщений', size=13)
axs[0].set_ylabel('Количество абонентов', size=13)
#Построение гистограммы использованных гигабайт тарифа Смарт
df[df['tariff'] == 'ultra']['messages_count'].hist(bins=50, ax=axs[1], edgecolor='black')
axs[1].set_title('Гистограмма распределения количества сообщений тарифа Ультра', size=14)
axs[1].set_xlabel('Количество отправленных сообщений', size=13)
axs[1].set_ylabel('Количество пользователей', size=13)
plt.show()
###Output
_____no_output_____
###Markdown
По полученным гистограммам видим, что в целом распределение на обоих графиках схоже, а значит и поведение почти всех клиентов, относящихся к разным тарифам, тоже схоже. Например, в обоих случаях мы наблюдаем пик в диапазоне 0-3 сообщений в месяц у клиентов тарифа Смарт (примерно у 350 чел.) и пик в диапазоне 0-5 сообщений в месяц у пользователей тарифа Ультра (около 210 чел.)У пользователей тарифа Смарт мы видим тенденцию расходования лимита сообщений и плату за дальнейшие сообщения.Абоненты тарифа Ультра наоборот никогда не выходят за лимит предоставляемого количества сообщений. **Среднее количество, дисперсия и стандартное отклонение.** Мы разделили генеральную совокупность на 2 выборки: тариф Смарт и тариф Ультра. По этим выборкам посчитали среднее, дисперсию и стандартное отклонение по количеству минут, использованных мегабайт и количеству сообщений. **Количество минут** ___Тариф Смарт___
###Code
print('Среднее по количеству минут в тарифе Смарт: {:.2f}'.format(df[df['tariff'] == 'smart']
['duration_calls_sum'].mean()))
print('Дисперсия по количеству минут в тарифе Смарт: {:.2f}'.format(np.var(df[df['tariff'] == 'smart']
['duration_calls_sum'])))
print('Стандартное отклонение по количеству минут в тарифе Смарт: {:.2f}'.format(np.std(df[df['tariff'] == 'smart']
['duration_calls_sum'], ddof=1)))
###Output
Среднее по количеству минут в тарифе Смарт: 417.93
Дисперсия по количеству минут в тарифе Смарт: 36203.07
Стандартное отклонение по количеству минут в тарифе Смарт: 190.31
###Markdown
___Тариф Ультра___
###Code
print('Среднее по количеству минут в тарифе Ультра: {:.2f}'.format(df[df['tariff'] == 'ultra']
['duration_calls_sum'].mean()))
print('Дисперсия по количеству минут в тарифе Ультра: {:.2f}'.format(np.var(df[df['tariff'] == 'ultra']
['duration_calls_sum'])))
print('Стандартное отклонение по количеству минут в тарифе Ультра: {:.2f}'.format(np.std(df[df['tariff'] == 'ultra']
['duration_calls_sum'], ddof=1)))
###Output
Среднее по количеству минут в тарифе Ультра: 526.62
Дисперсия по количеству минут в тарифе Ультра: 100771.22
Стандартное отклонение по количеству минут в тарифе Ультра: 317.61
###Markdown
- Среднее по количеству минут в тарифе Смарт равно 417,93, а в тарифе Ультра составляет 526,62 , что логично, лимит включенных минут в тарифе Ультра больше, соответственно люди покупают тариф Ультра чтобы пользоваться бОльшим количеством минут. - Дисперсия по количеству минут в тарифе Смарт равна 36203,07, а в тарифе Ультра 100771.22- Стандартное отклонение по количеству минут в тарифе Смарт равно 190.31. Стандартное отклонение по количеству минут в тарифе Ультра равно 317.61 **Количество использованных мегабайт** ___Тариф Смарт___
###Code
print('Среднее по количеству использованных мегабайт в тарифе Смарт: {:.2f}'.format(df[df['tariff'] == 'smart']
['mb_used_round_sum'].mean()))
print('Дисперсия по количеству использованных мегабайт в тарифе Смарт: {:.2f}'.format(np.var(df
[df['tariff'] == 'smart']
['mb_used_round_sum'])))
print('Стандартное отклонение по количеству использованных мегабайт в тарифе Смарт: {:.2f}'.format(np.std(df
[df['tariff'] == 'smart']
['mb_used_round_sum'], ddof=1)))
###Output
Среднее по количеству использованных мегабайт в тарифе Смарт: 16229.19
Дисперсия по количеству использованных мегабайт в тарифе Смарт: 34530412.74
Стандартное отклонение по количеству использованных мегабайт в тарифе Смарт: 5877.58
###Markdown
___Тариф Ультра___
###Code
print('Среднее по количеству использованных мегабайт в тарифе Ультра: {:.2f}'.format(df[df['tariff'] == 'ultra']
['mb_used_round_sum'].mean()))
print('Дисперсия по количеству использованных мегабайт в тарифе Ультра: {:.2f}'.format(np.var(df[df['tariff'] == 'ultra']
['mb_used_round_sum'])))
print('Стандартное отклонение по количеству использованных мегабайт в тарифе Ультра: {:.2f}'.format(np.std(df[df['tariff'] == 'ultra']
['mb_used_round_sum'], ddof=1)))
###Output
Среднее по количеству использованных мегабайт в тарифе Ультра: 19486.90
Дисперсия по количеству использованных мегабайт в тарифе Ультра: 101832133.11
Стандартное отклонение по количеству использованных мегабайт в тарифе Ультра: 10096.32
###Markdown
В результате получили, что:- Среднее по количеству использованных мегабайт в тарифе Смарт и Ультра не сильно различается и составляет 16229.19 и 19486.90. Исходя из полученного результата можно сказать, что для первой группы клиентов, использующих тариф Смарт, включенных мегабайт не будет хватать и в среднем каждый будет покупать дополнительно 1 Гб интернета, а для второй группы клиентов в среднем включенного трафика слишком много,т.к. лимит составляет 30 Гб (наше среднее гораздо меньше)- Дисперсия по количеству использованных мегабайт в тарифе Смарт равна 34530412.74, а в тарифе Ультра равна 101832133.11- Стандартное отклонение по количеству использованных мегабайт в тарифе Смарт оказалось равно 5877.58, стандартное отклонение по количеству использованных мегабайт в тарифе Ультра - 10096.32 **Количество сообщений** ___Тариф Смарт___
###Code
print('Среднее по количеству сообщений в тарифе Смарт: {:.2f}'.format(df[df['tariff'] == 'smart']
['messages_count'].mean()))
print('Дисперсия по количеству сообщений в тарифе Смарт: {:.2f}'.format(np.var(df[df['tariff'] == 'smart']
['messages_count'])))
print('Стандартное отклонение по количеству сообщений в тарифе Смарт: {:.2f}'.format(np.std(df[df['tariff'] == 'smart']
['messages_count'], ddof=1)))
###Output
Среднее по количеству сообщений в тарифе Смарт: 33.38
Дисперсия по количеству сообщений в тарифе Смарт: 796.46
Стандартное отклонение по количеству сообщений в тарифе Смарт: 28.23
###Markdown
___Тариф Ультра___
###Code
print('Среднее по количеству сообщений в тарифе Ультра: {:.2f}'.format(df[df['tariff'] == 'ultra']
['messages_count'].mean()))
print('Дисперсия по количеству сообщений в тарифе Ультра: {:.2f}'.format(np.var(df[df['tariff'] == 'ultra']
['messages_count'])))
print('Стандартное отклонение по количеству сообщений в тарифе Ультра: {:.2f}'.format(np.std(df[df['tariff'] == 'ultra']
['messages_count'], ddof=1)))
###Output
Среднее по количеству сообщений в тарифе Ультра: 49.36
Дисперсия по количеству сообщений в тарифе Ультра: 2282.95
Стандартное отклонение по количеству сообщений в тарифе Ультра: 47.80
###Markdown
- Среднее по количеству сообщений в тарифе Смарт и Ультра невысокое и составляет 33.38 и 49.36.- Дисперсия по количеству сообщений различается в двух тарифах достаточно сильно, почти в 3 раза и составляет в тарифе Смарт: 796.46, а в тарифе Ультра 2282.95- Стандартное отклонение по количеству сообщений в тарифе Смарт составляет 28.23, а в тарифе Ультра - 47.80 Шаг 4. Проверьте гипотезы **Проверка гипотезы "средняя выручка пользователей тарифов «Ультра» и «Смарт» различается"** Проведем тест Стъюдента для проверки нулевой гипотезы о равенстве средней выручки пользователей тарифов "Ультра" и "Смарт". Тогда альтернативная гипотеза - средняя выручка пользователей тарифов «Ультра» и «Смарт» различается. Выберем критический уровень статистической значимости альфа 5%.
###Code
alpha = .05
results_tariffs = st.ttest_ind(df[df['tariff'] == 'smart']['income_sum'], df[df['tariff'] == 'ultra']['income_sum'], equal_var = False )
print('p-значение:', results_tariffs.pvalue)
if (results_tariffs.pvalue < alpha):
print('Отвергаем нулевую гипотезу')
else:
print('Не получилось отвергнуть нулевую гипотезу')
###Output
p-значение: 2.7240946993530856e-246
Отвергаем нулевую гипотезу
###Markdown
Получили, что нулевая гипотеза о равенстве средней выручки тарифов "Смарт" и "Ультра" отверглась, тогда не отвергается альтернативная гипотеза - ___средняя выручка пользователей тарифов «Ультра» и «Смарт» различается___. **Проверка гипотезы "средняя выручка пользователей из Москвы отличается от выручки пользователей из других регионов".** Проведем тест Стъюдента для проверки нулевой гипотезы о равенстве средней выручки пользователей из Москвы и выручки пользователей из других регионов. Тогда альтернативная гипотеза - средняя выручка пользователей из Москвы отличается от выручки пользователей из других регионов. Выберем критический уровень статистической значимости альфа 5%.
###Code
alpha = .05
results_tariffs = st.ttest_ind(df[df['city'] == 'Москва']['income_sum'], df[df['city'] != 'Москва']['income_sum'])
print('p-значение:', results_tariffs.pvalue)
if (results_tariffs.pvalue < alpha):
print('Отвергаем нулевую гипотезу')
else:
print('Не получилось отвергнуть нулевую гипотезу')
###Output
p-значение: 0.531666503831252
Не получилось отвергнуть нулевую гипотезу
|
notebooks/candidate_models.ipynb | ###Markdown
Warren
###Code
warren_tweets = pickle.load(open('./tweetsent/tweets/old/warren_tweets_old.pkl', 'rb'))
warren_senti_scores = pickle.load(open('./tweetsent/senti_scores/warren_senti_scores.pkl', 'rb'))
# Finding Thresholds
num_retweets_warren = np.array([warren_tweets[i]['retweet_count']
for i in range(len(warren_tweets))])
# Finding max character
warren_max_char=0 #316
for i in warren_tweets[0:]:
warren_max_char = max(warren_max_char, warren_senti_scores[i['full_text']]['usage']['text_characters'])
# Create a corpus
corpus = set()
for tweet in warren_senti_scores:
corpus.update({i['text'] for i in warren_senti_scores[tweet]['keywords']})
warren_sorted_corpus = sorted(corpus)
with open('FeatureData/warren_corpus.pk', 'wb') as file:
pickle.dump(warren_sorted_corpus, file)
with open('FeatureData/warren_corpus.pk', 'rb') as file:
warren_sorted_corpus = pickle.load(file)
len(warren_sorted_corpus)
# Create Feature Matrix
warren_features = []
warren_labels = []
warren_feature_names = ['sadness', 'joy', 'fear', 'disgust', 'anger',
'sentiment', 'character'] + [i for i in warren_sorted_corpus]
for i in warren_tweets:
# Ambigious discarded Binary Labels
if i['retweet_count'] <= 1083: #1083
warren_labels.append(-1)
elif i['retweet_count'] >= 1614: #1614
warren_labels.append(1)
else:
continue
# Feature
tweet_feature = []
for j,k in warren_senti_scores[i['full_text']]['emotion']['document']['emotion'].items():
tweet_feature.append(k)
tweet_feature.append(warren_senti_scores[i['full_text']]['sentiment']['document']['score'])
warren_feature_names.append('sentiment')
tweet_feature.append(warren_senti_scores[i['full_text']]['usage']['text_characters']/warren_max_char)
warren_feature_names.append('character')
# One-hot Encoded Features
text_relevance = dict({sent['text']:sent['relevance'] for sent in warren_senti_scores[i['full_text']]['keywords']})
tweet_onehot=[]
for keys in warren_sorted_corpus:
tweet_onehot.append(0 if keys not in text_relevance.keys() else text_relevance[keys])
tweet_feature.extend(tweet_onehot)
# Add all to features matrix
warren_features.append(tweet_feature)
with open('FeatureData/warren_features.pk', 'wb') as file:
pickle.dump([warren_features, warren_feature_names, warren_labels], file)
with open('FeatureData/warren_features.pk', 'rb') as file:
warren_features, warren_feature_names, warren_labels = pickle.load(file)
sum(warren_labels)
X_train_warren, X_test_warren, y_train_warren, y_test_warren = train_test_split(warren_features, warren_labels, test_size=1/3, random_state=42)
lr_warren = LogisticRegression(C=2.0)
lr_warren.fit(X_train_warren, y_train_warren)
lr_warren.score(X_test_warren, y_test_warren)
print(f1_score(lr_warren.predict(X_test_warren), y_test_warren))
print(f1_score(lr_warren.predict(X_train_warren), y_train_warren))
warren_train_acc = lr_warren.score(X_train_warren, y_train_warren)
warren_test_acc = lr_warren.score(X_test_warren, y_test_warren)
warren_train_f1 = f1_score(lr_warren.predict(X_test_warren), y_test_warren)
warren_test_f1 = f1_score(lr_warren.predict(X_train_warren), y_train_warren)
with open('evaluate/warren_evaluate.pk', 'wb') as file:
pickle.dump([warren_train_acc, warren_test_acc, warren_train_f1, warren_test_f1], file)
print("\t","Train Acc\t", "Test Acc\t", "Train F1 Score\t", "Test F1 Score")
print("Warren\t", '{:3.4f}'.format(warren_train_acc), "\t", '{:3.4f}'.format(warren_test_acc), "\t",
'{:3.4f}'.format(warren_train_f1), "\t", '{:3.4f}'.format(warren_test_f1))
lr_warren.coef_[0][:10]
sorted(list(zip(lr_warren.coef_[0], warren_feature_names)), key=lambda x: x[0], reverse=True)[:10]
svm_warren = SVC(C=4.0, kernel='linear') # rbf -> .50, linear -> 0.652
svm_warren.fit(X_train_warren, y_train_warren)
svm_warren.score(X_test_warren, y_test_warren)
f1_score(svm_warren.predict(X_test_warren), y_test_warren)
print(f1_score(svm_warren.predict(X_train_warren), y_train_warren))
# Dump and load to pickle file.
with open('Predictions/warren_LR.pk', 'wb') as file:
pickle.dump(lr_warren, file)
with open('Predictions/warren_SVM.pk', 'wb') as file:
pickle.dump(svm_warren, file)
with open('Predictions/warren_LR.pk', 'rb') as file:
lr_warren = pickle.load(file)
with open('Predictions/warren_SVM.pk', 'rb') as file:
svm_warren = pickle.load(file)
###Output
_____no_output_____
###Markdown
Biden
###Code
biden_tweets = pickle.load(open('./tweetsent/tweets/old/biden_tweets_old.pkl', 'rb'))
biden_senti_scores = pickle.load(open('./tweetsent/senti_scores/biden_senti_scores.pkl', 'rb'))
# Finding Thresholds
num_retweets_biden = np.array([biden_tweets[i]['retweet_count']
for i in range(len(biden_tweets))])
# Finding max character
biden_max_char=0 #315
for i in biden_tweets[0:]:
biden_max_char = max(biden_max_char, biden_senti_scores[i['full_text']]['usage']['text_characters'])
# Create a corpus
corpus = set()
for tweet in biden_senti_scores:
corpus.update({i['text'] for i in biden_senti_scores[tweet]['keywords']})
biden_sorted_corpus = sorted(corpus)
with open('FeatureData/biden_corpus.pk', 'wb') as file:
pickle.dump(biden_sorted_corpus, file)
with open('FeatureData/biden_corpus.pk', 'rb') as file:
biden_sorted_corpus = pickle.load(file)
# Create Feature Matrix
biden_features = []
biden_labels = []
biden_feature_names = ['sadness', 'joy', 'fear', 'disgust', 'anger',
'sentiment', 'character'] + [i for i in biden_sorted_corpus]
for i in biden_tweets:
# Ambigious discarded Binary Labels
if i['retweet_count'] <= 208: #247: #302:
biden_labels.append(-1)
elif i['retweet_count'] >= 302: # 398: #784
biden_labels.append(1)
else:
continue
# Feature
tweet_feature = []
for j,k in biden_senti_scores[i['full_text']]['emotion']['document']['emotion'].items():
tweet_feature.append(k)
tweet_feature.append(biden_senti_scores[i['full_text']]['sentiment']['document']['score'])
biden_feature_names.append('sentiment')
tweet_feature.append(biden_senti_scores[i['full_text']]['usage']['text_characters']/biden_max_char)
biden_feature_names.append('character')
# One-hot Encoded Features
text_relevance = dict({sent['text']:sent['relevance'] for sent in biden_senti_scores[i['full_text']]['keywords']})
tweet_onehot=[]
for keys in biden_sorted_corpus:
tweet_onehot.append(0 if keys not in text_relevance.keys() else text_relevance[keys])
tweet_feature.extend(tweet_onehot)
# Add all to features matrix
biden_features.append(tweet_feature)
with open('FeatureData/biden_features.pk', 'wb') as file:
pickle.dump([biden_features, biden_feature_names, biden_labels], file)
with open('FeatureData/biden_features.pk', 'rb') as file:
biden_features, biden_feature_names, biden_labels = pickle.load(file)
sum(biden_labels)
X_train_biden, X_test_biden, y_train_biden, y_test_biden = train_test_split(biden_features, biden_labels, test_size=1/3, random_state=42)
lr_biden = LogisticRegression(C=2.0)
lr_biden.fit(X_train_biden, y_train_biden)
lr_biden.score(X_test_biden, y_test_biden)
f1_score(lr_biden.predict(X_test_biden), y_test_biden)
f1_score(lr_biden.predict(X_train_biden), y_train_biden)
biden_train_acc = lr_biden.score(X_train_biden, y_train_biden)
biden_test_acc = lr_biden.score(X_test_biden, y_test_biden)
biden_train_f1 = f1_score(lr_biden.predict(X_test_biden), y_test_biden)
biden_test_f1 = f1_score(lr_biden.predict(X_train_biden), y_train_biden)
with open('evaluate/biden_evaluate.pk', 'wb') as file:
pickle.dump([biden_train_acc, biden_test_acc, biden_train_f1, biden_test_f1], file)
biden_feature_names[:10]
lr_biden.coef_[0][:10]
sorted(list(zip(lr_biden.coef_[0], biden_feature_names)), key=lambda x: x[0], reverse=True)[:10]
svm_biden = SVC(C=4.0, kernel='linear') # rbf -> .86, linear -> 0.85
svm_biden.fit(X_train_biden, y_train_biden)
svm_biden.score(X_test_biden, y_test_biden)
f1_score(svm_biden.predict(X_test_biden), y_test_biden)
f1_score(svm_biden.predict(X_train_biden), y_train_biden)
# Dump and load to pickle file.
with open('Predictions/biden_LR.pk', 'wb') as file:
pickle.dump(lr_biden, file)
with open('Predictions/biden_SVM.pk', 'wb') as file:
pickle.dump(svm_biden, file)
with open('Predictions/biden_LR.pk', 'rb') as file:
lr_biden = pickle.load(file)
with open('Predictions/biden_SVM.pk', 'rb') as file:
svm_biden = pickle.load(file)
###Output
_____no_output_____
###Markdown
Bernie
###Code
bernie_tweets = pickle.load(open('./tweetsent/tweets/old/bernie_tweets_old.pkl', 'rb'))
bernie_senti_scores = pickle.load(open('./tweetsent/senti_scores/bernie_senti_scores.pkl', 'rb'))
# Finding Thresholds
num_retweets_bernie = np.array([bernie_tweets[i]['retweet_count']
for i in range(len(bernie_tweets))])
# Finding max character
bernie_max_char=0 # 304
for i in bernie_tweets:
bernie_max_char = max(bernie_max_char, bernie_senti_scores[i['full_text']]['usage']['text_characters'])
# Create a corpus
corpus = set()
for tweet in bernie_senti_scores:
corpus.update({i['text'] for i in bernie_senti_scores[tweet]['keywords']})
bernie_sorted_corpus = sorted(corpus)
with open('FeatureData/bernie_corpus.pk', 'wb') as file:
pickle.dump(bernie_sorted_corpus, file)
with open('FeatureData/bernie_corpus.pk', 'rb') as file:
bernie_sorted_corpus = pickle.load(file)
# Create Feature Matrix
bernie_features = []
bernie_labels = []
bernie_feature_names = ['sadness', 'joy', 'fear', 'disgust', 'anger',
'sentiment', 'character'] + [i for i in bernie_sorted_corpus]
for i in bernie_tweets:
# Ambigious discarded Binary Labels
if i['retweet_count'] <= 1080:
bernie_labels.append(-1)
elif i['retweet_count'] >= 1612:
bernie_labels.append(1)
else:
continue
# Feature
tweet_feature = []
for j,k in bernie_senti_scores[i['full_text']]['emotion']['document']['emotion'].items():
tweet_feature.append(k)
tweet_feature.append(bernie_senti_scores[i['full_text']]['sentiment']['document']['score'])
bernie_feature_names.append('sentiment')
tweet_feature.append(bernie_senti_scores[i['full_text']]['usage']['text_characters']/bernie_max_char)
bernie_feature_names.append('character')
# One-hot Encoded Features
text_relevance = dict({sent['text']:sent['relevance'] for sent in bernie_senti_scores[i['full_text']]['keywords']})
tweet_onehot=[]
for keys in bernie_sorted_corpus:
tweet_onehot.append(0 if keys not in text_relevance.keys() else text_relevance[keys])
tweet_feature.extend(tweet_onehot)
# Add all to features matrix
bernie_features.append(tweet_feature)
with open('FeatureData/bernie_features.pk', 'wb') as file:
pickle.dump([bernie_features, bernie_feature_names, bernie_labels], file)
with open('FeatureData/bernie_features.pk', 'rb') as file:
bernie_features, bernie_feature_names, bernie_labels = pickle.load(file)
sum(bernie_labels)
X_train_bernie, X_test_bernie, y_train_bernie, y_test_bernie = train_test_split(bernie_features, bernie_labels, test_size=1/3, random_state=42)
lr_bernie = LogisticRegression(C=2.0)
lr_bernie.fit(X_train_bernie, y_train_bernie)
lr_bernie.score(X_test_bernie, y_test_bernie)
f1_score(lr_bernie.predict(X_test_bernie), y_test_bernie)
f1_score(lr_bernie.predict(X_train_bernie), y_train_bernie)
bernie_train_acc = lr_bernie.score(X_train_bernie, y_train_bernie)
bernie_test_acc = lr_bernie.score(X_test_bernie, y_test_bernie)
bernie_train_f1 = f1_score(lr_bernie.predict(X_test_bernie), y_test_bernie)
bernie_test_f1 = f1_score(lr_bernie.predict(X_train_bernie), y_train_bernie)
with open('evaluate/bernie_evaluate.pk', 'wb') as file:
pickle.dump([bernie_train_acc, bernie_test_acc, bernie_train_f1, bernie_test_f1], file)
lr_bernie.coef_[0][:10]
sorted(list(zip(lr_bernie.coef_[0], bernie_feature_names)), key=lambda x: x[0], reverse=True)[:10]
svm_bernie = SVC(C=4.0, kernel='linear') # rbf -> .86, linear -> 0.85
svm_bernie.fit(X_train_bernie, y_train_bernie)
svm_bernie.score(X_test_bernie, y_test_bernie)
f1_score(svm_bernie.predict(X_test_bernie), y_test_bernie)
f1_score(svm_bernie.predict(X_train_bernie), y_train_bernie)
# Dump and load to pickle file.
with open('Predictions/bernie_LR.pk', 'wb') as file:
pickle.dump(lr_bernie, file)
with open('Predictions/bernie_SVM.pk', 'wb') as file:
pickle.dump(svm_bernie, file)
with open('Predictions/bernie_LR.pk', 'rb') as file:
lr_bernie = pickle.load(file)
with open('Predictions/bernie_SVM.pk', 'rb') as file:
svm_bernie = pickle.load(file)
###Output
_____no_output_____
###Markdown
Yang
###Code
yang_tweets = pickle.load(open('./tweetsent/tweets/old/yang_tweets_old.pkl', 'rb'))
yang_senti_scores = pickle.load(open('./tweetsent/senti_scores/yang_senti_scores.pkl', 'rb'))
# Finding Thresholds
num_retweets_yang = np.array([yang_tweets[i]['retweet_count']
for i in range(len(yang_tweets))])
# Finding max character
yang_max_char= 0 #329
for i in yang_tweets:
yang_max_char = max(yang_max_char, yang_senti_scores[i['full_text']]['usage']['text_characters'])
# Create a corpus
corpus = set()
for tweet in yang_senti_scores:
corpus.update({i['text'] for i in yang_senti_scores[tweet]['keywords']})
yang_sorted_corpus = sorted(corpus)
with open('FeatureData/yang_corpus.pk', 'wb') as file:
pickle.dump(yang_sorted_corpus, file)
with open('FeatureData/yang_corpus.pk', 'rb') as file:
yang_sorted_corpus = pickle.load(file)
# Create Feature Matrix
yang_features = []
yang_labels = []
yang_feature_names = ['sadness', 'joy', 'fear', 'disgust', 'anger',
'sentiment', 'character'] + [i for i in yang_sorted_corpus]
for i in yang_tweets:
# Ambigious discarded Binary Labels
if i['retweet_count'] <= 335: #880:
yang_labels.append(-1)
elif i['retweet_count'] >= 524: #1612:
yang_labels.append(1)
else:
continue
# Feature
tweet_feature = []
for j,k in yang_senti_scores[i['full_text']]['emotion']['document']['emotion'].items():
tweet_feature.append(k)
tweet_feature.append(yang_senti_scores[i['full_text']]['sentiment']['document']['score'])
yang_feature_names.append('sentiment')
tweet_feature.append(yang_senti_scores[i['full_text']]['usage']['text_characters']/yang_max_char)
yang_feature_names.append('character')
# One-hot Encoded Features
text_relevance = dict({sent['text']:sent['relevance'] for sent in yang_senti_scores[i['full_text']]['keywords']})
tweet_onehot=[]
for keys in yang_sorted_corpus:
tweet_onehot.append(0 if keys not in text_relevance.keys() else text_relevance[keys])
tweet_feature.extend(tweet_onehot)
# Add all to features matrix
yang_features.append(tweet_feature)
with open('FeatureData/yang_features.pk', 'wb') as file:
pickle.dump([yang_features, yang_feature_names, yang_labels], file)
with open('FeatureData/yang_features.pk', 'rb') as file:
yang_features, yang_feature_names, yang_labels = pickle.load(file)
sum(yang_labels)
X_train_yang, X_test_yang, y_train_yang, y_test_yang = train_test_split(yang_features, yang_labels, test_size=1/3, random_state=42)
lr_yang = LogisticRegression(C=2.0)
lr_yang.fit(X_train_yang, y_train_yang)
lr_yang.score(X_test_yang, y_test_yang)
f1_score(lr_yang.predict(X_test_yang), y_test_yang)
f1_score(lr_yang.predict(X_train_yang), y_train_yang)
yang_train_acc = lr_yang.score(X_train_yang, y_train_yang)
yang_test_acc = lr_yang.score(X_test_yang, y_test_yang)
yang_train_f1 = f1_score(lr_yang.predict(X_test_yang), y_test_yang)
yang_test_f1 = f1_score(lr_yang.predict(X_train_yang), y_train_yang)
with open('evaluate/yang_evaluate.pk', 'wb') as file:
pickle.dump([yang_train_acc, yang_test_acc, yang_train_f1, yang_test_f1], file)
lr_yang.coef_[0][:10]
sorted(list(zip(lr_yang.coef_[0], yang_feature_names)), key=lambda x: x[0], reverse=True)[:10]
svm_yang = SVC(C=4.0, kernel='linear') # rbf -> .86, linear -> 0.85
svm_yang.fit(X_train_yang, y_train_yang)
svm_yang.score(X_test_yang, y_test_yang)
f1_score(svm_yang.predict(X_test_yang), y_test_yang)
f1_score(svm_yang.predict(X_train_yang), y_train_yang)
# Dump and load to pickle file.
with open('Predictions/yang_LR.pk', 'wb') as file:
pickle.dump(lr_yang, file)
with open('Predictions/yang_SVM.pk', 'wb') as file:
pickle.dump(svm_yang, file)
with open('Predictions/yang_LR.pk', 'rb') as file:
lr_yang = pickle.load(file)
with open('Predictions/yang_SVM.pk', 'rb') as file:
svm_yang = pickle.load(file)
###Output
_____no_output_____ |
lab_4.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
#g = pd.read_csv(filename, sep=';').groupby('')
#d = {g:x for g,x in pd.DataFrame.groupby('The_evil_column')}
wine=pd.read_csv('winequality-red.csv',delimiter=';')
wine.tail()
data = wine.replace('?',np.NaN)
print('Number of instances = %d' % (data.shape[0]))
print('Number of attributes = %d' % (data.shape[1]))
print('Number of missing values:')
for col in data.columns:
print('\t%s: %d' % (col,data[col].isna().sum()))
%matplotlib inline
data2 = data.drop(['citric acid'],axis=1)
data2['alcohol'] = pd.to_numeric(data2['alcohol'])
data2.boxplot(figsize=(20,3))
Z = (data2-data2.mean())/data2.std()
Z[20:40]
print('Number of rows before discarding outliers = %d' % (Z.shape[0]))
Z2 = Z.loc[((Z >-.1).sum(axis=1)==9) & ((Z <=1).sum(axis=1)==9),:]
print('Number of rows after discarding missing values = %d' % (Z2.shape[0]))
#data3.index = pd.to_numeric(data2['chlorides'])
data3 = data2['pH']
ax = data3.plot(kind='line',figsize=(15,3))
ax.set_title('Aggrigation (variance = %.4f)' % (data3.var()))
sample = data.sample(frac=0.01, replace=True, random_state=1)
sample
###Output
_____no_output_____
###Markdown
Nota para el futuro: Separar esto en dos partes. Agregar los diagnósticos de la clase 5 Algoritmo Metropolis Hastings Problema de la mochilaDado un conjunto de $m$ elementos cada uno descrito por su masa $w_j$ y su valor $v_j$ y una mochila cuyo límite de capacidad es $C$. Asumiendo que el volumen y la forma de los objetos no importan, encuentre el subconjunto de objetos de mayor valor que puede ser llevado en la mochila. Este es un problema de optimización combinatorial NP completoPodemos definir la variable indicadora$$x = (z_1, z_2, \ldots, z_m)$$donde cada $z_i \in \{0, 1\}$ es igual a 1 si el elemento $i$ está en la mochila y 0 si no lo estáSe define entonces un espacio de posibilidades$$\Omega = \left \{x \in {0, 1}^m : \sum_{j=1}^m w_j z_j \leq C \right \}$$De donde queremos encontrar aquella que maximiza la utilidad$$U(x) = \sum_{j=1}^m v_j z_j$$ Solución con Monte CarloPara resolverlo con Monte Carlo podríamos- Dado $x_t$- Escoger $j \in [1, ..., m]$ al azar de manera uniforme- Crear $y=(z_1, z_2, ..., 1-z_j,..., z_m)$, si es la mejor solución hasta ahora, guardarla- Si $y$ es factible entonces $x_{t+1} = y$ de lo contrario $x_{t+1} = x_t$Pero esto podría tardar muchisimo para $m$ grande Actividad: Simulated AnnealingResuelva este problema usando el algoritmo de Simulated Annealing, el cual es una versión del algoritmo de Metropolis donde la distribución de interés se formula a partir de la función de utilidad como$$p(x) = \frac{1}{Z} \exp \left(\frac{U(x)}{T} \right) $$donde $T$ es la temperatura y $Z = \sum_{x\in \Omega} \exp \left(\frac{U(x)}{T} \right)$ es la evidencia que no depende de $x$- Escriba la taza de aceptación $\alpha$ y el valor de $r$- Implemente la solución de Monte Carlo - Implemente la solución de Simulated annealing modificando el criterio de aceptación ingenuo por de Metropolis- Para el conjunto de datos $X_1$ - Compare SA con el método de montecarlo clásico ¿Cuánto demora cada uno en llegar a la solución óptima? - Explore la influencia del parámetro $T$. Muestre y compare resultados con un $T$ grande, adecuado y pequeño decididos por usted. Pruebe con un valor de $T$ adaptivo dado por $$ T_i = \frac{1}{\log(i)} $$- Para el conjunto de datos $X_2$. Encuentre un valor de $T$ adecuado y muestre la mejor solución obtenida usando SAReferencias:- Láminas 17 a 21: https://cindy.informatik.uni-bremen.de/cosy/teaching/CM_2011/fitting/mcmc.pdf- Láminas 4 a 8: http://sites.science.oregonstate.edu/~kovchegy/web/papers/MCMC.pdf
###Code
X1 = {"m": 10, "C": 2586,
"v": [81, 141, 211, 321, 408, 549, 596, 750, 953, 1173],
"w": [36, 78, 110, 214, 259, 356, 377, 487, 689, 862]
}
X2 = {"m": 25, "C": 10356,
"v": [39, 93, 159, 240, 274, 493, 588, 752, 1025, 1324, 1588, 1826, 1936, 2045,
2287, 2486, 2818, 2850, 3072, 3219, 3499, 3596, 3620, 4067, 4432],
"w": [5, 42, 84, 126, 133, 309, 368, 502, 761, 1020, 1283, 1517, 1584, 1656,
1865, 2031, 2320, 2349, 2553, 2667, 2929, 3024, 3047, 3452, 3790]
}
###Output
_____no_output_____
###Markdown
Algoritmo Metropolis Hastings Problema de la mochilaDado un conjunto de $m$ elementos cada uno descrito por su masa $w_j$ y su valor $v_j$ y una mochila cuyo límite de capacidad es $C$. Asumiendo que el volumen y la forma de los objetos no importan, encuentre el subconjunto de objetos de mayor valor que puede ser llevado en la mochila. Este es un problema de optimización combinatorial NP completoPodemos definir la variable indicadora$$x = (z_1, z_2, \ldots, z_m)$$donde cada $z_i \in \{0, 1\}$ es igual a 1 si el elemento $i$ está en la mochila y 0 si no lo estáSe define entonces un espacio de posibilidades$$\Omega = \left \{x \in {0, 1}^m : \sum_{j=1}^m w_j z_j \leq C \right \}$$De donde queremos encontrar aquella que maximiza la utilidad$$U(x) = \sum_{j=1}^m v_j z_j$$ Solución con Monte CarloPara resolverlo con Monte Carlo podríamos- Dado $x_t$- Escoger $j \in [1, ..., m]$ al azar de manera uniforme- Crear $y=(z_1, z_2, ..., 1-z_j,..., z_m)$, si es la mejor solución hasta ahora, guardarla- Si $y$ es factible entonces $x_{t+1} = y$ de lo contrario $x_{t+1} = x_t$Pero esto podría tardar muchisimo para $m$ grande Actividad: Simulated AnnealingResuelva este problema usando el algoritmo de Simulated Annealing, el cual es una versión del algoritmo de Metropolis donde la distribución de interés se formula a partir de la función de utilidad como$$p(x) = \frac{1}{Z} \exp \left(\frac{U(x)}{T} \right) $$donde $T$ es la temperatura y $Z = \sum_{x\in \Omega} \exp \left(\frac{U(x)}{T} \right)$ es la evidencia que no depende de $x$- Escriba la taza de aceptación $\alpha$ y el valor de $r$- Implemente la solución de Monte Carlo - Implemente la solución de Simulated annealing modificando el criterio de aceptación ingenuo por de Metropolis- Para el conjunto de datos $X_1$ - Compare SA con el método de montecarlo clásico ¿Cuánto demora cada uno en llegar a la solución óptima? - Explore la influencia del parámetro $T$. Muestre y compare resultados con un $T$ grande, adecuado y pequeño decididos por usted. Pruebe con un valor de $T$ adaptivo dado por $$ T_i = \frac{1}{\log(i)} $$- Para el conjunto de datos $X_2$. Encuentre un valor de $T$ adecuado y muestre la mejor solución obtenida usando SAReferencias:- Láminas 17 a 21: https://cindy.informatik.uni-bremen.de/cosy/teaching/CM_2011/fitting/mcmc.pdf- Láminas 4 a 8: http://sites.science.oregonstate.edu/~kovchegy/web/papers/MCMC.pdf
###Code
X1 = {"m": 10, "C": 2586,
"v": [81, 141, 211, 321, 408, 549, 596, 750, 953, 1173],
"w": [36, 78, 110, 214, 259, 356, 377, 487, 689, 862]
}
X2 = {"m": 25, "C": 10356,
"v": [39, 93, 159, 240, 274, 493, 588, 752, 1025, 1324, 1588, 1826, 1936, 2045,
2287, 2486, 2818, 2850, 3072, 3219, 3499, 3596, 3620, 4067, 4432],
"w": [5, 42, 84, 126, 133, 309, 368, 502, 761, 1020, 1283, 1517, 1584, 1656,
1865, 2031, 2320, 2349, 2553, 2667, 2929, 3024, 3047, 3452, 3790]
}
###Output
_____no_output_____
###Markdown
Дисциплина "Вычислительный практикум" Задание №4 Приближённое вычисление интеграла по составным квадратурным формулам Ковальчуков Александр, 223 группа Вариант №4 Постановка задачиНеобходимо написать программу для приближенного вычисления интегралов при помощи составных квадратурных формул.Порядок действия следующий. Разбиваем промежутор интегрирования $[a, b]$ на $m$ равных частей.$h = \frac{(b - a)}{m}$ - длина частичного разбиения.Обозначим $x_k = a + kh, \; f_k = f(x_k), k = 0, 1, \dots, m$Интегралы вычисляются по следующим формулам:Составная квадратурная формула прямоугольников:$$\int_a^b f(x)dx \approx h \sum_{k=0}^{m-1}f(\alpha + kh)$$При $\alpha = a$ получаем формулу левых прямоугольников, при $\alpha = a + h/2$ - формулу среднихпрямоугольников, а при $\alpha = a + h$ - правых прямоугольников.Алгебраическая степень точности формул левых и правых прямоугольников равна 0,формулы средних прямоугольников - 1.Теоретическая погрешность формул левых и правых прямоугольников составяет $\frac{1}{2} (b-a) h * \max_{[a, b]} | f'(x)|$,формулы средних прямоугольников равна $\frac{1}{24} (b - a) h^2 * \max_{[a, b]} |f''(x)|$.В вычислении участвует ровно $m$ значений функции.Составная квадратурная формула трапеций$$\int_a^b f(x)dx \approx \frac{h}{2} (f_0 + f_m + 2 \sum_{k=1}^{m-1}f_k)$$АСТ формулы трапеций равна 1.Теоретическая погрешность равна $\frac{1}{12} (b - a) h^2 * \max_{[a, b]} |f''(x)|$.В вычислении участвует ровно $m + 1$ значение функции.Составная квадратурная формула Симпсона$$\int_a^b f(x)dx \approx \frac{h}{6} (f_0 + f_n + 4 \sum_{k=0}^{m-1}f(a + k h) + 2 \sum_{k=0}^{m-2} f(b + kh))$$Её АСТ равна 3, теоретическая погрешность равна $\frac{1}{2884} (b - a) h^4 * \max_{[a, b]} |f^{(4)}(x)|$.В вычислении участвует $2m + 1$ значение функции.Параметры задачи:$a, b$ - начало и конец промежутка интегрирования$m$ - число промежутков деления составной кадратурной формулыПараметры $a, m, h$ предлагается ввести с клавиатуры пользователю.Помимо этого, пользователю предлагается выбрать одну из предопределенных функций для интегрирования.$p_0(x) = 8$$p_1(x) = 1.32 x - 1$$p_2(x) = -4 x^2 + 1.23 x - 1$$p_3(x) = 0.232 x^3 + 32$$p_5(x) = 1.27 x^5 + 2.04x$$cos(x)$$exp(x)$Код программы написан ня языке python с использованием интерактивной среды Jupyter notebook.
###Code
import math
import pandas as pd
def left(a, b, func, w, m):
h = (b - a) / m
alpha = a
return h * sum(w(alpha + k * h) * func(alpha + k * h) for k in range(m))
def right(a, b, func, w, m):
h = (b - a) / m
alpha = a + h
return h * sum(w(alpha + k * h) * func(alpha + k * h) for k in range(m))
def middle(a, b, func, w, m):
h = (b - a) / m
alpha = a + h/2
return h * sum(w(alpha + k * h) * func(alpha + k * h) for k in range(m))
def trapeze(a, b, func, w, m):
h = (b - a) / m
alpha = a
return h / 2 * (w(a) * func(a) + w(b) * func(b) + 2 * sum(w(alpha + k * h) * func(alpha + k * h) for k in range(1, m)))
def simpson(a, b, func, w, m):
h = (b - a) / m
alpha = a + h / 2
beta = a + h
print(b)
return h / 6 * (w(a) * func(a) + 4 * sum(w(alpha + k * h) * func(alpha + k * h) for k in range(m)) +
w(b) * func(b) + 2 * sum(w(alpha + k * h) * func(beta + k * h) for k in range(m - 1)))
###Output
_____no_output_____
###Markdown
Определение некоторых функций, их первообразных и 1,2,4 производных для вычисления теоретической погрешности:
###Code
w = lambda x: 1
exp = lambda x: math.e ** x
I_exp = exp
d_exp = d2_exp = d4_exp = exp
cos = lambda x: math.cos(x)
I_cos = lambda x: math.sin(x)
d_cos = lambda x: -math.sin(x)
d2_cos = lambda x: -math.cos(x)
d4_cos = lambda x: math.cos(x)
p_0 = lambda x: 8
I_p_0 = lambda x: 8 * x
d_p_0 = d2_p_0 = d4_p_0 = lambda x: 0
p_1 = lambda x: 1.32 * x - 1
I_p_1 = lambda x: 1.32 / 2 * x**2 - x
d_p_1 = lambda x: 1.32
d2_p_1 = d4_p_1 = lambda x: 0
p_2 = lambda x: -4 * x**2 + 1.23 * x - 1
I_p_2 = lambda x: -4 / 3 * x**3 + 1.23 / 2 * x**2 - x
d_p_2 = lambda x: -4 * 2 * x + 1.23
d2_p_2 = lambda x: - 4 * 2
d4_p_2 = lambda x: 0
p_3 = lambda x: 0.232 * x**3 + 32
I_p_3 = lambda x: 0.232 / 4 * x**4 + 32 * x
d_p_3 = lambda x: 0.232 * 3 * x**2
d2_p_3 = lambda x: 0.232 * 3 * 2 * x
d4_p_3 = lambda x: 0
p_5 = lambda x: 1.27 * x**5 + 2.04 * x
I_p_5 = lambda x: 1.27 / 6 * x**6 + 2.04 / 2 * x**2
d_p_5 = lambda x: 1.27 * 5 * x**4 + 2.04
d2_p_5 = lambda x: 1.27 * 5 * 4 * x**3
d4_p_5 = lambda x: 1.27 * 5 * 4 * 3 * 2 * x
functions = {'exp': (exp, I_exp, d_exp, d2_exp, d4_exp),
'cos': (cos, I_cos, d_cos, d2_cos, d4_cos),
'p_0': (p_0, I_p_0, d_p_0, d2_p_0, d4_p_0),
'p_1': (p_1, I_p_1, d_p_1, d2_p_1, d4_p_1),
'p_2': (p_2, I_p_2, d_p_2, d2_p_2, d4_p_2),
'p_3': (p_3, I_p_3, d_p_3, d2_p_3, d4_p_3),
'p_5': (p_5, I_p_5, d_p_5, d2_p_5, d4_p_5)
}
###Output
_____no_output_____
###Markdown
Основной код программы
###Code
def integrate():
# ввод данных
print('Введите a - начало промежутка интегрирования')
A = float(input())
print('Введите b - конец промежутка интегрирования')
B = float(input())
print('Введите m - количество промежутков в разбиении')
M = int(input())
print('Выберите функцию', functions.keys())
func_str = input()
if func_str in functions.keys():
func, I_func, d_func, d2_func, d4_func = functions[func_str]
else:
print('Функция не найдена')
return 0
# вычисление интегралов
results = {'left': {}, 'right': {}, 'middle': {}, 'trapeze': {}, 'simpson': {}}
results['left']['J(h)'] = left(A, B, func, w, M)
results['right']['J(h)'] = right(A, B, func, w, M)
results['middle']['J(h)'] = middle(A, B, func, w, M)
results['trapeze']['J(h)'] = trapeze(A, B, func, w, M)
results['simpson']['J(h)'] = simpson(A, B, func, w, M)
# вычисление теоретической погрешности
h = (B - A) / M
results['left']['Теор погр'] = 1/2 * (B - A) * h * max(map(abs, [d_func(A + k * h) for k in range(M + 1)]))
results['right']['Теор погр'] = 1/2 * (B - A) * h * max(map(abs, [d_func(A + k * h) for k in range(M + 1)]))
results['middle']['Теор погр'] = 1/24 * (B - A) * h**2 * max(map(abs, [d2_func(A + k * h) for k in range(M + 1)]))
results['trapeze']['Теор погр'] = 1/12 * (B - A) * h**2 * max(map(abs, [d2_func(A + k * h) for k in range(M + 1)]))
results['simpson']['Теор погр'] = 1/2884 * (B - A) * h**4 * max(map(abs, [d4_func(A + k * h) for k in range(M + 1)]))
# вычисление фактической погрешности
exact = I_func(B) - I_func(A)
for i in results.keys():
results[i]['|J(h) - J|'] = abs(results[i]['J(h)'] - exact)
print(f'a = {A}, b= {B}, m = {M}, h = {h}')
print(f'Точное значение интеграла равно J = {exact}')
print('\nРезультаты численного интегрирования\n')
# вывод результатов
df = pd.DataFrame(results)
print(df)
###Output
_____no_output_____
###Markdown
Тестирование программы
###Code
while True:
integrate()
print("\nВведите q, чтобы завершить программу, или любую другую"
" клавишу, чтобы продолжить и ввести новые параметры:", end=' ')
k = input()
if k == 'q':
break
else:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
###Output
Введите a - начало промежутка интегрирования
0
Введите b - конец промежутка интегрирования
1
Введите m - количество промежутков в разбиении
100
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
p_0
1.0
a = 0.0, b= 1.0, m = 100, h = 0.01
Точное значение интеграла равно J = 8.0
Результаты численного интегрирования
left right middle trapeze simpson
J(h) 8.0 8.0 8.0 8.0 8.0
Теор погр 0.0 0.0 0.0 0.0 0.0
|J(h) - J| 0.0 0.0 0.0 0.0 0.0
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
-3
Введите b - конец промежутка интегрирования
1.5
Введите m - количество промежутков в разбиении
10
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
p_1
1.5
a = -3.0, b= 1.5, m = 10, h = 0.45
Точное значение интеграла равно J = -8.955000000000002
Результаты численного интегрирования
left right middle trapeze simpson
J(h) -10.2915 -7.6185 -8.955 -8.955000e+00 -8.955000e+00
Теор погр 1.3365 1.3365 0.000 0.000000e+00 0.000000e+00
|J(h) - J| 1.3365 1.3365 0.000 3.552714e-15 3.552714e-15
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
-2
Введите b - конец промежутка интегрирования
4
Введите m - количество промежутков в разбиении
1000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
p_2
4.0
a = -2.0, b= 4.0, m = 1000, h = 0.006
Точное значение интеграла равно J = -94.61999999999999
Результаты численного интегрирования
left right middle trapeze simpson
J(h) -94.498284 -94.742004 -94.619928 -94.620144 -9.462000e+01
Теор погр 0.553860 0.553860 0.000072 0.000144 0.000000e+00
|J(h) - J| 0.121716 0.122004 0.000072 0.000144 1.421085e-14
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: 0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
0
Введите b - конец промежутка интегрирования
1
Введите m - количество промежутков в разбиении
10000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
p_3
1.0
a = 0.0, b= 1.0, m = 10000, h = 0.0001
Точное значение интеграла равно J = 32.058
Результаты численного интегрирования
left right middle trapeze simpson
J(h) 32.057988 32.058012 3.205800e+01 3.205800e+01 3.205800e+01
Теор погр 0.000035 0.000035 5.800000e-10 1.160000e-09 0.000000e+00
|J(h) - J| 0.000012 0.000012 2.899867e-10 5.799876e-10 7.105427e-15
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
-5
Введите b - конец промежутка интегрирования
5
Введите m - количество промежутков в разбиении
100
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
p_5
5.0
a = -5.0, b= 5.0, m = 100, h = 0.1
Точное значение интеграла равно J = 0.0
Результаты численного интегрирования
left right middle trapeze simpson
J(h) -397.895 397.895 1.409717e-12 1.909939e-12 6.669628e-13
Теор погр 1985.395 1985.395 1.322917e+01 2.645833e+01 2.642164e-04
|J(h) - J| 397.895 397.895 1.409717e-12 1.909939e-12 6.669628e-13
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
20
Введите b - конец промежутка интегрирования
25
Введите m - количество промежутков в разбиении
1000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
exp
25.0
a = 20.0, b= 25.0, m = 1000, h = 0.005
Точное значение интеграла равно J = 71519734141.97598
Результаты численного интегрирования
left right middle trapeze \
J(h) 7.134108e+10 7.169868e+10 7.151966e+10 7.151988e+10
Теор погр 9.000612e+08 9.000612e+08 3.750255e+05 7.500510e+05
|J(h) - J| 1.786503e+08 1.789483e+08 7.449967e+04 1.489994e+05
simpson
J(h) 7.151973e+10
Теор погр 7.802195e-02
|J(h) - J| 1.556396e-02
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
20
Введите b - конец промежутка интегрирования
25
Введите m - количество промежутков в разбиении
10000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
exp
25.0
a = 20.0, b= 25.0, m = 10000, h = 0.0005
Точное значение интеграла равно J = 71519734141.97598
Результаты численного интегрирования
left right middle trapeze \
J(h) 7.150186e+10 7.153762e+10 7.151973e+10 7.151974e+10
Теор погр 9.000612e+07 9.000612e+07 3.750255e+03 7.500510e+03
|J(h) - J| 1.787844e+07 1.788142e+07 7.449972e+02 1.489995e+03
simpson
J(h) 7.151973e+10
Теор погр 7.802195e-06
|J(h) - J| 0.000000e+00
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
20
Введите b - конец промежутка интегрирования
25
Введите m - количество промежутков в разбиении
100000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
exp
25.0
a = 20.0, b= 25.0, m = 100000, h = 5e-05
Точное значение интеграла равно J = 71519734141.97598
Результаты численного интегрирования
left right middle trapeze \
J(h) 7.151795e+10 7.152152e+10 7.151973e+10 7.151973e+10
Теор погр 9.000612e+06 9.000612e+06 3.750255e+01 7.500510e+01
|J(h) - J| 1.787978e+06 1.788008e+06 7.449417e+00 1.489980e+01
simpson
J(h) 7.151973e+10
Теор погр 7.802195e-10
|J(h) - J| 3.662109e-04
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Введите a - начало промежутка интегрирования
20
Введите b - конец промежутка интегрирования
25
Введите m - количество промежутков в разбиении
1000000
Выберите функцию dict_keys(['exp', 'cos', 'p_0', 'p_1', 'p_2', 'p_3', 'p_5'])
exp
25.0
a = 20.0, b= 25.0, m = 1000000, h = 5e-06
Точное значение интеграла равно J = 71519734141.97598
Результаты численного интегрирования
left right middle trapeze \
J(h) 7.151956e+10 7.151991e+10 7.151973e+10 7.151973e+10
Теор погр 9.000612e+05 9.000612e+05 3.750255e-01 7.500510e-01
|J(h) - J| 1.787992e+05 1.787995e+05 7.571411e-02 1.491089e-01
simpson
J(h) 7.151973e+10
Теор погр 7.802195e-14
|J(h) - J| 7.324219e-04
Введите q, чтобы завершить программу, или любую другую клавишу, чтобы продолжить и ввести новые параметры: q
###Markdown
lec 4 Use SQL review python
###Code
demo_str = 'this is my string'
for word_item in demo_str.split():
print(word_item)
print('{} + {} is {} ' .format(1,2,1+2))
###Output
1 + 2 is 3
###Markdown
install or import libs
###Code
!pip install psycopg2
import pandas
import configparser
import psycopg2
###Output
/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.
""")
###Markdown
establish connection
###Code
config = configparser.ConfigParser()
config.read('config.ini')
host = config['myaws']['host']
db = config['myaws']['db']
user = config['myaws']['user']
pwd = config['myaws']['pwd']
print(user)
conn = psycopg2.connect(
host = host,
user = user,
password = pwd,
dbname = db
)
cur = conn.cursor()
###Output
_____no_output_____
###Markdown
query the data
###Code
sql_statement = """
select bathroom,bedroom
from public.house_price
where bathroom >2
"""
cur.execute(sql_statement)
cur.fetchone()
for bathroom,bedroom in cur.fetchall()[:10]:
print (bathroom,bedroom)
###Output
3 3
3 4
5 4
3 3
6 5
6 5
3 3
3 3
4 5
4 5
###Markdown
pandas
###Code
import pandas
df = pandas.read_sql_query(sql_statement,conn)
df[:10]
sql_avg_price_year = """
select built_in,
avg(price) as avg_price
from public.house_price
group by built_in
order by built_in
"""
df = pandas.read_sql_query(sql_avg_price_year,conn)
df.plot(x = 'built_in', y = 'avg_price')
sql_price_area = """
select price, area
from public.house_price
"""
df = pandas.read_sql_query(sql_price_area,conn)
df['price'].hist()
df.plot.scatter(x='area', y= 'price')
sql_avg_price_house_type = """
select house_type,
avg(price) as avg_price
from public.house_price
group by house_type
order by avg_price desc
"""
df = pandas.read_sql_query(sql_avg_price_house_type,conn)
df.plot.bar(y= 'avg_price', x= 'house_type')
###Output
_____no_output_____
###Markdown
modify tables
###Code
sql_insert = """
insert into gp12.student(s_email,s_name,major)
values('{}','{}','{}')
""".format('[email protected]', 's5', 'GS')
print(sql_insert)
cur.execute(sql_insert)
conn.commit()
df=pandas.read_sql_query('select * from gp12.student',conn)
df[-1:]
cur.execute("ROLLBACK")
conn.commit()
###Output
_____no_output_____
###Markdown
close connections
###Code
cur.close()
conn.close()
###Output
_____no_output_____
###Markdown
Вычислительный практикум Задание №4 Задача Коши для обыкновенного дифференциального уравнения 1-го порядка. Ковальчуков Александр 321 группа Вариант №6 $y' = 1 + (0.5 - x) \sin y - (1 + x) y$$y(0) = 0$
###Code
from math import sin
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
plt.rcParams['figure.dpi'] = 150
def dy(y, x):
return 1 + (0.5 - x) * sin(y) - (1 + x) * y
def ddy(x):
return dy(x[1], x[0])
y0 = 0
###Output
_____no_output_____
###Markdown
Решение при помощи встроенных средств
###Code
a, b = 0, 1
h = 0.1
n = int((b - a) / h) + 1
x = np.arange(a, b + h, h)
y = odeint(dy, y0, x)
y = np.array(y).reshape(n, )
d_y = list(map(ddy, zip(x, y)))
y_math = pd.DataFrame({"x" : x, "y" : y, "y'" : d_y})
y_math
plt.plot(x, y, label="y")
plt.plot(x, d_y, label="y'")
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.title("Графики y и y', построенные встроенными методами")
plt.show()
###Output
_____no_output_____
###Markdown
Решение методом Эйлера с шагом $h$
###Code
def euler(h):
x_h = [0]
y_h = [y0]
y = y0
n = int((b - a) / h) + 1
for i in range(1, n):
x = a + h * i
y = y + h * dy(y, x)
x_h.append(x)
y_h.append(y)
return pd.DataFrame({'x': x_h, 'y': y_h})
y_h = euler(h)
y_h
###Output
_____no_output_____
###Markdown
Решение методом Эйлера с шагом $\frac{h}{2}$
###Code
y_h2 = euler(h/2)
y_h2
R_m = (np.array(y_h2['y'][::2]) - np.array(y_h['y'])) / (2**1 - 1)
y_rev = np.array(y_h2['y'][::2]) + R_m
y_rev = pd.DataFrame({'x': y_h['x'], 'y' : y_rev})
y_rev
###Output
_____no_output_____
###Markdown
Сравнительная таблица
###Code
pd.DataFrame({'x': y_math['x'],
'y_math': y_math['y'],
'y_h': y_h['y'],
'y_h2': np.array(y_h2['y'][::2]),
'y_rev': y_rev['y'],
'y_rev - y_math': y_rev['y'] - y_math['y']})
x = y_math['x']
plt.plot(x, y_math['y'], label="y_math")
plt.plot(x, y_h['y'], label="y_h")
plt.plot(x, np.array(y_h2['y'][::2]), label="y_h2")
plt.plot(x, y_rev['y'], label="y_rev")
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.title("Сравнительный график")
plt.show()
###Output
_____no_output_____
###Markdown
Метод Рунге-Кутты четвертого порядка
###Code
x_h = [0]
y_h = [y0]
y = y0
x = a
h = 0.1
n = int((b - a) / h) + 1
for i in range(1, n):
k1 = h * dy(y, x)
k2 = h * dy(y + k1/2, x + h/2)
k3 = h * dy(y + k2/2, x + h/2)
k4 = h * dy(y + k3, x + h)
x += h
y += 1/6 * (k1 + 2 * k2 + 2 * k3 + k4)
x_h.append(x)
y_h.append(y)
y_RK = pd.DataFrame({'x': x_h, 'y': y_h})
y_RK
###Output
_____no_output_____
###Markdown
Экстраполяционный метод Адамса 4-го порядка
###Code
# начало таблицы возьмем из метода Рунге-Кутты
y_Ad_ex = y_RK.copy()
y = y_Ad_ex['y'][4]
for i in range(4, n):
y = y + 1 / 720 * h * ( 1901 * dy(y_Ad_ex['y'][i], y_Ad_ex['x'][i])
- 2774 * dy(y_Ad_ex['y'][i-1], y_Ad_ex['x'][i-1])
+ 2616 * dy(y_Ad_ex['y'][i-2], y_Ad_ex['x'][i-2])
- 1274 * dy(y_Ad_ex['y'][i-3], y_Ad_ex['x'][i-3])
+ 251 * dy(y_Ad_ex['y'][i-4], y_Ad_ex['x'][i-4]))
y_Ad_ex['y'][i + 1] = y
y_Ad_ex
###Output
_____no_output_____
###Markdown
Итерполяционный метод Адамса 4-го порядка
###Code
# начало таблицы возьмем из метода Рунге-Кутты
y_Ad_in = y_Ad_ex.copy()
y = y_Ad_ex['y'][3]
for i in range(3, n-1):
y = y + 1 / 720 * h * ( 251 * dy(y_Ad_in['y'][i + 1], y_Ad_in['x'][i + 1])
+ 646 * dy(y_Ad_in['y'][i], y_Ad_in['x'][i])
- 264 * dy(y_Ad_in['y'][i-1], y_Ad_in['x'][i-1])
+ 106 * dy(y_Ad_in['y'][i-2], y_Ad_in['x'][i-2])
- 19 * dy(y_Ad_in['y'][i-3], y_Ad_in['x'][i-3]))
y_Ad_in['y'][i + 1] = y
y_Ad_in
###Output
_____no_output_____
###Markdown
Сравнительная таблица
###Code
pd.DataFrame({'x': y_math['x'],
'y_math': y_math['y'],
'y_math - y_RK': y_math['y'] - y_RK['y'],
'y_math - y_Ad_ex': y_math['y'] - y_Ad_ex['y'],
'y_math - y_Ad_in': y_math['y'] - y_Ad_in['y'],
})
###Output
_____no_output_____ |
docs/source/notebooks/getting_started.ipynb | ###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda-forge:```conda install -c conda-forge pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.4.1
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| The pdf of this distribution is
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| plt.style.use('seaborn-darkgrid')
| x = np.linspace(-5, 5, 1000)
| mus = [0., 0., 0., -2.]
| sds = [0.4, 1., 2., 0.4]
| for mu, sd in zip(mus, sds):
| pdf = st.norm.pdf(x, mu, sd)
| plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sd))
| plt.xlabel('x', fontsize=12)
| plt.ylabel('f(x)', fontsize=12)
| plt.legend(loc=1)
| plt.show()
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sd=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 2230.33it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
0%| | 0/5000 [00:00<?, ?it/s]/home/osvaldo/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 2676.93it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
100%|██████████| 1000/1000 [00:00<00:00, 1299.35it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
100%|██████████| 5500/5500 [00:06<00:00, 811.17it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sd=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sd=volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [r_missing, s, sigma, nu]
100%|██████████| 2500/2500 [01:53<00:00, 21.94it/s]
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} e, & \text{if } t \le s \\ l, & \text{if } t \gt s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
100%|██████████| 10500/10500 [00:11<00:00, 945.76it/s]
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
100%|██████████| 1000/1000 [00:01<00:00, 976.25it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import the components we will need from PyMC.
###Code
from pymc3 import Model, Normal, HalfNormal
###Output
_____no_output_____
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = Model()
with basic_model:
# Priors for unknown model parameters
alpha = Normal('alpha', mu=0, sd=10)
beta = Normal('beta', mu=0, sd=10, shape=2)
sigma = HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0).
| tau : float
| Precision (tau > 0).
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, *args, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
from pymc3 import find_MAP
map_estimate = find_MAP(model=basic_model)
print(map_estimate)
###Output
Optimization terminated successfully.
Current function value: 149.017982
Iterations: 16
Function evaluations: 21
Gradient evaluations: 21
{'alpha': array(0.9065985497559482), 'beta': array([ 0.94848602, 2.60705514]), 'sigma_log_': array(-0.03278147017403066)}
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = find_MAP(model=basic_model, fmin=optimize.fmin_powell)
print(map_estimate)
###Output
Optimization terminated successfully.
Current function value: 149.019762
Iterations: 4
Function evaluations: 176
{'alpha': array(0.9090521898977764), 'beta': array([ 0.95140146, 2.61437458]), 'sigma_log_': array(-0.030009775203258385)}
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from pymc3 import NUTS, sample
from scipy import optimize
with basic_model:
# draw 2000 posterior samples
trace = sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using advi...
Average ELBO = -153.48: 100%|██████████| 200000/200000 [00:17<00:00, 11639.06it/s]
Finished [100%]: Average ELBO = -153.51
100%|██████████| 2000/2000 [00:03<00:00, 522.55it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
from pymc3 import Slice
with basic_model:
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = Slice(vars=[sigma])
# draw 5000 posterior samples
trace = sample(5000, step=step, start=start)
###Output
Assigned NUTS to alpha
Assigned NUTS to beta
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
from pymc3 import traceplot
traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
from pymc3 import summary
summary(trace)
###Output
alpha:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.905 0.099 0.002 [0.727, 1.120]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.709 0.839 0.904 0.971 1.106
beta:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.948 0.087 0.001 [0.767, 1.112]
2.591 0.510 0.018 [1.653, 3.601]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.777 0.890 0.949 1.006 1.123
1.602 2.249 2.591 2.960 3.555
sigma:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.990 0.072 0.001 [0.853, 1.129]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.863 0.940 0.986 1.036 1.142
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(y_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $y$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo!-Finance; it can be installed with `pip install pandas-datareader`.
###Code
try:
from pandas_datareader import data
except ImportError:
!pip install pandas-datareader
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Adj Close'].pct_change()
print(len(returns))
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $ \nu $ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
from pymc3 import Exponential, StudentT, Deterministic
from pymc3.math import exp
from pymc3.distributions.timeseries import GaussianRandomWalk
with Model() as sp500_model:
nu = Exponential('nu', 1./10, testval=5.)
sigma = Exponential('sigma', 1./.02, testval=.1)
s = GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = Deterministic('volatility_process', exp(-2*s))
r = StudentT('r', nu, lam=1/volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
from pymc3 import variational
import scipy
with sp500_model:
trace = sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using advi...
Average ELBO = 883.19: 100%|██████████| 200000/200000 [00:45<00:00, 4380.24it/s]
Finished [100%]: Average ELBO = 883.43
100%|██████████| 2000/2000 [03:20<00:00, 15.80it/s]
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
traceplot(trace[200:], [nu, sigma]);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process'])
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year")
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$ the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
from pymc3 import DiscreteUniform, Poisson
from pymc3.math import switch
with Model() as disaster_model:
switchpoint = DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = Exponential('early_rate', 1)
late_rate = Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = switch(switchpoint >= year, early_rate, late_rate)
disasters = Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
from pymc3 import Metropolis
with disaster_model:
trace = sample(10000)
###Output
Assigned Metropolis to switchpoint
Assigned NUTS to early_rate_log_
Assigned NUTS to late_rate_log_
Assigned Metropolis to disasters_missing
100%|██████████| 10000/10000 [00:13<00:00, 758.15it/s]
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
traceplot(trace);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as T
from theano.compile.ops import as_op
@as_op(itypes=[T.lscalar], otypes=[T.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with Model() as model_deterministic:
a = Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_arbitrary_deterministic.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as Tfrom pymc3 import DensityDist, Uniformwith Model() as model: alpha = Uniform('intercept', -100, 100) Create custom densities beta = DensityDist('beta', lambda value: -1.5 * T.log(1 + value**2), testval=0) eps = DensityDist('eps', lambda value: -T.log(T.abs_(value)), testval=1) Create likelihood like = Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function using the `as_op` decorator, though this is not strictly necessary.
###Code
from pymc3.distributions import Continuous
class Beta(Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
@as_op(itypes=[T.dscalar], otypes=[T.dscalar])
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import glm
with Model() as model_glm:
glm('y ~ x1 + x2', df)
trace = sample(5000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using advi...
Average ELBO = -190.04: 100%|██████████| 200000/200000 [00:21<00:00, 9506.68it/s]
Finished [100%]: Average ELBO = -190.04
100%|██████████| 5000/5000 [00:07<00:00, 701.65it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with Model() as model_glm_logistic:
glm('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Backends`PyMC3` has support for different ways to store samples during and after sampling, called backends, including in-memory (default), text file, and SQLite. These can be found in `pymc.backends`:By default, an in-memory `ndarray` is used but if the samples would get too large to be held in memory we could use the `sqlite` backend:
###Code
from pymc3.backends import SQLite
with Model() as model_glm_logistic:
glm('y ~ x1', df_logistic, family=Binomial())
backend = SQLite('trace.sqlite')
trace = sample(5000, trace=backend)
summary(trace, varnames=['x1'])
###Output
x1:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.000 0.129 0.001 [-0.253, 0.250]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
-0.251 -0.089 0.001 0.090 0.253
###Markdown
The stored trace can then later be loaded using the `load` command:
###Code
from pymc3.backends.sqlite import load
with basic_model:
trace_loaded = load('trace.sqlite')
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda:```conda install pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.6
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sigma=10)
beta = pm.Normal('beta', mu=0, sigma=10, shape=2)
sigma = pm.HalfNormal('sigma', sigma=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sigma=10)beta = Normal('beta', mu=0, sigma=10, shape=2)sigma = HalfNormal('sigma', sigma=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available in the [API documentation](https://docs.pymc.io/api.html). Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
/Users/twiecki/working/projects/pymc/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 1478.46it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
/Users/twiecki/working/projects/pymc/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
0%| | 0/5000 [00:00<?, ?it/s]/Users/twiecki/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 1276.05it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:01<00:00, 1479.97draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
Sampling 2 chains: 100%|██████████| 11000/11000 [00:09<00:00, 1156.62draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of 401 daily returns of the S&P 500 stock market index during the 2008 financial crisis.
###Code
import pandas as pd
returns = pd.read_csv(pm.get_data('SP500.csv'), parse_dates=True, index_col=0)
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sigma=volatility_process, observed=returns['change'])
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [s, sigma, nu]
Sampling 2 chains: 100%|██████████| 5000/5000 [02:04<00:00, 40.14draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
/Users/twiecki/working/projects/pymc/pymc3/plots/__init__.py:40: UserWarning: Keyword argument `varnames` renamed to `var_names`, and will be removed in pymc3 3.8
warnings.warn('Keyword argument `{old}` renamed to `{new}`, and will be removed in pymc3 3.8'.format(old=old, new=new))
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a `nan` in the pandas `Series`. These missing values will be automatically imputed by `PyMC3`. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
import pandas as pd
disaster_data = pd.Series([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, np.nan, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, np.nan, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} e, & \text{if } t \le s \\ l, & \text{if } t \gt s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
/Users/twiecki/working/projects/pymc/pymc3/model.py:1277: UserWarning: Data in disasters contains missing values and will be automatically imputed from the sampling distribution.
warnings.warn(impute_message, UserWarning)
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
Sampling 2 chains: 100%|██████████| 21000/21000 [00:14<00:00, 1400.63draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sigma=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
df = pd.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:01<00:00, 1402.82draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pd.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda-forge:```conda install -c conda-forge pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.4.1
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| The pdf of this distribution is
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| plt.style.use('seaborn-darkgrid')
| x = np.linspace(-5, 5, 1000)
| mus = [0., 0., 0., -2.]
| sds = [0.4, 1., 2., 0.4]
| for mu, sd in zip(mus, sds):
| pdf = st.norm.pdf(x, mu, sd)
| plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sd))
| plt.xlabel('x', fontsize=12)
| plt.ylabel('f(x)', fontsize=12)
| plt.legend(loc=1)
| plt.show()
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sd=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 2230.33it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
0%| | 0/5000 [00:00<?, ?it/s]/home/osvaldo/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 2676.93it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
100%|██████████| 1000/1000 [00:00<00:00, 1299.35it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
100%|██████████| 5500/5500 [00:06<00:00, 811.17it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sd=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sd=volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [r_missing, s, sigma, nu]
100%|██████████| 2500/2500 [01:53<00:00, 21.94it/s]
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
100%|██████████| 10500/10500 [00:11<00:00, 945.76it/s]
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
100%|██████████| 1000/1000 [00:01<00:00, 976.25it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
###Output
_____no_output_____
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0).
| tau : float
| Precision (tau > 0).
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.017982
Iterations: 16
Function evaluations: 21
Gradient evaluations: 21
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = pm.find_MAP(model=basic_model, fmin=optimize.fmin_powell)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.019762
Iterations: 4
Function evaluations: 176
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from scipy import optimize
with basic_model:
# draw 500 posterior samples
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 156.08: 5%|▌ | 10932/200000 [00:01<00:31, 6082.63it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 237.04
100%|██████████| 1000/1000 [00:01<00:00, 710.94it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# obtain starting values via MAP
start = pm.find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step, start=start)
###Output
1%| | 38/5500 [00:00<00:14, 376.26it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
_ = pm.traceplot(trace)
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace)
###Output
alpha:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.905 0.099 0.001 [0.715, 1.103]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.711 0.838 0.904 0.971 1.101
beta:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.949 0.086 0.002 [0.786, 1.118]
2.599 0.507 0.014 [1.590, 3.591]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.784 0.889 0.948 1.007 1.117
1.593 2.256 2.605 2.940 3.599
sigma:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.991 0.073 0.001 [0.852, 1.134]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.859 0.941 0.986 1.037 1.147
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(y_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $y$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo!-Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1./10, testval=5.)
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s))
r = pm.StudentT('r', nu, lam=1/volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = -868.62: 32%|███▏ | 63550/200000 [00:23<00:50, 2717.77it/s]
Convergence archived at 63600
Interrupted at 63,600 [31%]: Average Loss = 559.54
100%|██████████| 2500/2500 [02:37<00:00, 15.91it/s]
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, [nu, sigma]);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process'])
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year")
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= year, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Assigned Metropolis to switchpoint
Assigned NUTS to early_rate_log__
Assigned NUTS to late_rate_log__
Assigned Metropolis to disasters_missing
100%|██████████| 10500/10500 [01:11<00:00, 146.76it/s]
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 164.12: 5%|▌ | 10845/200000 [00:01<00:21, 8809.19it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 220.44
100%|██████████| 1000/1000 [00:01<00:00, 831.50it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.3
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| x = np.linspace(-5.0, 5.0, 1000)
| fig, ax = plt.subplots()
| f = lambda mu, sd : st.norm.pdf(x, loc=mu, scale=sd)
| plot_pdf = lambda a, b : ax.plot(x, f(a,b), label=r'$\mu$={0}, $\sigma$={1}'.format(a,b))
| plot_pdf(0.0, 0.4)
| plot_pdf(0.0, 1.0)
| plot_pdf(0.0, 2.0)
| plot_pdf(-2.0, 0.4)
| plt.legend(loc='upper right', frameon=False)
| ax.set(xlim=[-5,5], ylim=[0,1.2], xlabel='x', ylabel='f(x)')
| plt.show()
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sd=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 346.18it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = pm.find_MAP(model=basic_model, fmin=optimize.fmin_powell)
map_estimate
###Output
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/tuning/starting.py:92: UserWarning: In future versions, set the optimization algorithm with a string. For example, use `method="L-BFGS-B"` instead of `fmin=sp.optimize.fmin_l_bfgs_b"`.
warnings.warn('In future versions, set the optimization algorithm with a string. '
logp = -149.47: 4%|▎ | 176/5000 [00:00<00:02, 1864.63it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from scipy import optimize
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/model.py:384: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
if not np.issubdtype(var.dtype, float):
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [sigma_log__, beta, alpha]
100%|██████████| 1000/1000 [00:01<00:00, 768.80it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# obtain starting values via MAP
start = pm.find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step, start=start)
###Output
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/tuning/starting.py:92: UserWarning: In future versions, set the optimization algorithm with a string. For example, use `method="L-BFGS-B"` instead of `fmin=sp.optimize.fmin_l_bfgs_b"`.
warnings.warn('In future versions, set the optimization algorithm with a string. '
logp = -149.47: 4%|▎ | 176/5000 [00:00<00:02, 1853.26it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
_ = pm.traceplot(trace)
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Google Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_google('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1./10, testval=5.)
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s))
r = pm.StudentT('r', nu, lam=volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/model.py:384: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
if not np.issubdtype(var.dtype, float):
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [r_missing, s, sigma_log__, nu_log__]
100%|██████████| 2500/2500 [02:26<00:00, 17.08it/s]
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, [nu, sigma]);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= year, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/model.py:384: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
if not np.issubdtype(var.dtype, float):
Multiprocess sampling (4 chains in 4 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate_log__, early_rate_log__]
99%|█████████▉| 10434/10500 [00:12<00:00, 849.77it/s]/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
100%|██████████| 10500/10500 [00:12<00:00, 849.78it/s]
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
Tuning was enabled throughout the whole trace.
Tuning was enabled throughout the whole trace.
Tuning was enabled throughout the whole trace.
Tuning was enabled throughout the whole trace.
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
/home/maxwell/anaconda3/envs/bayes/lib/python3.6/site-packages/pymc3/model.py:384: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
if not np.issubdtype(var.dtype, float):
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [sd_log__, x2, x1, Intercept]
100%|██████████| 1000/1000 [00:01<00:00, 777.09it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import the components we will need from PyMC.
###Code
from pymc3 import Model, Normal, HalfNormal
###Output
_____no_output_____
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = Model()
with basic_model:
# Priors for unknown model parameters
alpha = Normal('alpha', mu=0, sd=10)
beta = Normal('beta', mu=0, sd=10, shape=2)
sigma = HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0).
| tau : float
| Precision (tau > 0).
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
from pymc3 import find_MAP
map_estimate = find_MAP(model=basic_model)
print(map_estimate)
###Output
Optimization terminated successfully.
Current function value: 149.017982
Iterations: 16
Function evaluations: 21
Gradient evaluations: 21
{'alpha': array(0.9065985497559482), 'beta': array([ 0.94848602, 2.60705514]), 'sigma_log__': array(-0.03278147017403069)}
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = find_MAP(model=basic_model, fmin=optimize.fmin_powell)
print(map_estimate)
###Output
Optimization terminated successfully.
Current function value: 149.019762
Iterations: 4
Function evaluations: 176
{'alpha': array(0.9090521898977764), 'beta': array([ 0.95140146, 2.61437458]), 'sigma_log__': array(-0.030009775203258385)}
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from pymc3 import NUTS, sample
from scipy import optimize
with basic_model:
# draw 500 posterior samples
trace = sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 153.48: 100%|██████████| 200000/200000 [00:22<00:00, 8763.36it/s]
Finished [100%]: Average Loss = 153.48
100%|██████████| 1000/1000 [00:01<00:00, 846.31it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
from pymc3 import Slice
with basic_model:
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = Slice(vars=[sigma])
# draw 5000 posterior samples
trace = sample(5000, step=step, start=start)
###Output
Assigned NUTS to alpha
Assigned NUTS to beta
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
from pymc3 import traceplot
traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
from pymc3 import summary
summary(trace)
###Output
alpha:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.905 0.099 0.001 [0.706, 1.094]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.711 0.836 0.904 0.970 1.102
beta:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.950 0.088 0.001 [0.780, 1.123]
2.604 0.516 0.006 [1.559, 3.582]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.777 0.890 0.950 1.010 1.120
1.594 2.250 2.604 2.953 3.626
sigma:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.990 0.072 0.001 [0.853, 1.132]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.862 0.939 0.985 1.035 1.145
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(y_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $y$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo!-Finance; it can be installed with `pip install pandas-datareader`.
###Code
try:
from pandas_datareader import data
except ImportError:
!pip install pandas-datareader
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Adj Close'].pct_change()
print(len(returns))
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
from pymc3 import Exponential, StudentT, Deterministic
from pymc3.math import exp
from pymc3.distributions.timeseries import GaussianRandomWalk
with Model() as sp500_model:
nu = Exponential('nu', 1./10, testval=5.)
sigma = Exponential('sigma', 1./.02, testval=.1)
s = GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = Deterministic('volatility_process', exp(-2*s))
r = StudentT('r', nu, lam=1/volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
from pymc3 import variational
import scipy
with sp500_model:
trace = sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = -912.35: 100%|██████████| 200000/200000 [00:55<00:00, 3627.95it/s]
Finished [100%]: Average Loss = -912.3
100%|██████████| 2500/2500 [02:20<00:00, 17.74it/s]
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
traceplot(trace[200:], [nu, sigma]);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process'])
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year")
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
from pymc3 import DiscreteUniform, Poisson
from pymc3.math import switch
with Model() as disaster_model:
switchpoint = DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = Exponential('early_rate', 1)
late_rate = Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = switch(switchpoint >= year, early_rate, late_rate)
disasters = Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
from pymc3 import Metropolis
with disaster_model:
trace = sample(10000)
###Output
Assigned Metropolis to switchpoint
Assigned NUTS to early_rate_log__
Assigned NUTS to late_rate_log__
Assigned Metropolis to disasters_missing
100%|██████████| 10500/10500 [00:59<00:00, 176.92it/s]
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
traceplot(trace);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as T
from theano.compile.ops import as_op
@as_op(itypes=[T.lscalar], otypes=[T.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with Model() as model_deterministic:
a = Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_arbitrary_deterministic.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as Tfrom pymc3 import DensityDist, Uniformwith Model() as model: alpha = Uniform('intercept', -100, 100) Create custom densities beta = DensityDist('beta', lambda value: -1.5 * T.log(1 + value**2), testval=0) eps = DensityDist('eps', lambda value: -T.log(T.abs_(value)), testval=1) Create likelihood like = Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
from pymc3.distributions import Continuous
class Beta(Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[T.dscalar], otypes=[T.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 161.46: 100%|██████████| 200000/200000 [00:22<00:00, 8980.38it/s]
Finished [100%]: Average Loss = 161.47
100%|██████████| 1000/1000 [00:00<00:00, 1074.24it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
For a more complete and flexible formula interface, including hierarchical GLMs, see [Bambi](https://github.com/bambinos/bambi). Backends`PyMC3` has support for different ways to store samples during and after sampling, called backends, including in-memory (default), text file, and SQLite. These can be found in `pymc.backends`:By default, an in-memory `ndarray` is used but if the samples would get too large to be held in memory we could use the `hdf5` backend:
###Code
import pymc3 as pm
from pymc3.backends import SQLite
with Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
backend = SQLite('trace.sqlite')
trace = sample(trace=backend)
summary(trace, varnames=['x1'])
###Output
_____no_output_____
###Markdown
The stored trace can then later be loaded using the `load` command:
###Code
from pymc3.backends.sqlite import load
with basic_model:
trace_loaded = load('trace.sqlite')
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda:```conda install pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.5
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Normal(name, *args, **kwargs)
|
| Univariate normal log-likelihood.
|
| The pdf of this distribution is
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| plt.style.use('seaborn-darkgrid')
| x = np.linspace(-5, 5, 1000)
| mus = [0., 0., 0., -2.]
| sds = [0.4, 1., 2., 0.4]
| for mu, sd in zip(mus, sds):
| pdf = st.norm.pdf(x, mu, sd)
| plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sd))
| plt.xlabel('x', fontsize=12)
| plt.ylabel('f(x)', fontsize=12)
| plt.legend(loc=1)
| plt.show()
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sd=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logcdf(self, value)
|
| logp(self, value)
| Calculate log-probability of Normal distribution at specified value.
|
| Parameters
| ----------
| value : numeric
| Value(s) for which log-probability is calculated. If the log probabilities for multiple
| values are desired the values must be provided in a numpy array or theano tensor
|
| Returns
| -------
| TensorVariable
|
| random(self, point=None, size=None)
| Draw random values from Normal distribution.
|
| Parameters
| ----------
| point : dict, optional
| Dict of variable values on which random values are to be
| conditioned (uses default point if not specified).
| size : int, optional
| Desired size of random sample (returns one sample if not
| specified).
|
| Returns
| -------
| array
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 651.28it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
0%| | 0/5000 [00:00<?, ?it/s]/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 591.98it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:02<00:00, 989.75draws/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
Sampling 2 chains: 100%|██████████| 11000/11000 [00:17<00:00, 637.97draws/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of 401 daily returns of the S&P 500 stock market index during the 2008 financial crisis.
###Code
import pandas as pd
returns = pd.read_csv(pm.get_data('SP500.csv'), parse_dates=True, index_col=0)
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sd=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sd=volatility_process, observed=returns['change'])
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [s, sigma, nu]
Sampling 2 chains: 100%|██████████| 5000/5000 [03:14<00:00, 25.70draws/s]
The acceptance probability does not match the target. It is 0.6657586191001563, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} e, & \text{if } t \le s \\ l, & \text{if } t \gt s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/model.py:1266: UserWarning: Data in disasters contains missing values and will be automatically imputed from the sampling distribution.
warnings.warn(impute_message, UserWarning)
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
Sampling 2 chains: 100%|██████████| 21000/21000 [00:30<00:00, 691.21draws/s]
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:02<00:00, 980.21draws/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda-forge:```conda install -c conda-forge pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.4.1
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| The pdf of this distribution is
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| plt.style.use('seaborn-darkgrid')
| x = np.linspace(-5, 5, 1000)
| mus = [0., 0., 0., -2.]
| sds = [0.4, 1., 2., 0.4]
| for mu, sd in zip(mus, sds):
| pdf = st.norm.pdf(x, mu, sd)
| plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sd))
| plt.xlabel('x', fontsize=12)
| plt.ylabel('f(x)', fontsize=12)
| plt.legend(loc=1)
| plt.show()
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sd=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 2230.33it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
0%| | 0/5000 [00:00<?, ?it/s]/home/osvaldo/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 2676.93it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
100%|██████████| 1000/1000 [00:00<00:00, 1299.35it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
100%|██████████| 5500/5500 [00:06<00:00, 811.17it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_yahoo('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sd=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sd=volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [r_missing, s, sigma, nu]
100%|██████████| 2500/2500 [01:53<00:00, 21.94it/s]
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
100%|██████████| 10500/10500 [00:11<00:00, 945.76it/s]
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
100%|██████████| 1000/1000 [00:01<00:00, 976.25it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda:```conda install pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.5
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sigma=10)
beta = pm.Normal('beta', mu=0, sigma=10, shape=2)
sigma = pm.HalfNormal('sigma', sigma=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sigma=10)beta = Normal('beta', mu=0, sigma=10, shape=2)sigma = HalfNormal('sigma', sigma=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Normal(name, *args, **kwargs)
|
| Univariate normal log-likelihood.
|
| The pdf of this distribution is
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| .. plot::
|
| import matplotlib.pyplot as plt
| import numpy as np
| import scipy.stats as st
| plt.style.use('seaborn-darkgrid')
| x = np.linspace(-5, 5, 1000)
| mus = [0., 0., 0., -2.]
| sds = [0.4, 1., 2., 0.4]
| for mu, sd in zip(mus, sds):
| pdf = st.norm.pdf(x, mu, sd)
| plt.plot(x, pdf, label=r'$\mu$ = {}, $\sigma$ = {}'.format(mu, sd))
| plt.xlabel('x', fontsize=12)
| plt.ylabel('f(x)', fontsize=12)
| plt.legend(loc=1)
| plt.show()
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0) (only required if tau is not specified).
| tau : float
| Precision (tau > 0) (only required if sd is not specified).
|
| Examples
| --------
| .. code-block:: python
|
| with pm.Model():
| x = pm.Normal('x', mu=0, sigma=10)
|
| with pm.Model():
| x = pm.Normal('x', mu=0, tau=1/23)
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sigma=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logcdf(self, value)
|
| logp(self, value)
| Calculate log-probability of Normal distribution at specified value.
|
| Parameters
| ----------
| value : numeric
| Value(s) for which log-probability is calculated. If the log probabilities for multiple
| values are desired the values must be provided in a numpy array or theano tensor
|
| Returns
| -------
| TensorVariable
|
| random(self, point=None, size=None)
| Draw random values from Normal distribution.
|
| Parameters
| ----------
| point : dict, optional
| Dict of variable values on which random values are to be
| conditioned (uses default point if not specified).
| size : int, optional
| Desired size of random sample (returns one sample if not
| specified).
|
| Returns
| -------
| array
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| __latex__ = _repr_latex_(self, name=None, dist=None)
| Magic method name for IPython to use for LaTeX formatting.
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| logp_nojac(self, *args, **kwargs)
| Return the logp, but do not include a jacobian term for transforms.
|
| If we use different parametrizations for the same distribution, we
| need to add the determinant of the jacobian of the transformation
| to make sure the densities still describe the same distribution.
| However, MAP estimates are not invariant with respect to the
| parametrization, we need to exclude the jacobian terms in this case.
|
| This function should be overwritten in base classes for transformed
| distributions.
|
| logp_sum(self, *args, **kwargs)
| Return the sum of the logp values for the given observations.
|
| Subclasses can use this to improve the speed of logp evaluations
| if only the sum of the logp values is needed.
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 651.28it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
0%| | 0/5000 [00:00<?, ?it/s]/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 591.98it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:02<00:00, 989.75draws/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
Sampling 2 chains: 100%|██████████| 11000/11000 [00:17<00:00, 637.97draws/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
_____no_output_____
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of 401 daily returns of the S&P 500 stock market index during the 2008 financial crisis.
###Code
import pandas as pd
returns = pd.read_csv(pm.get_data('SP500.csv'), parse_dates=True, index_col=0)
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sigma=volatility_process, observed=returns['change'])
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [s, sigma, nu]
Sampling 2 chains: 100%|██████████| 5000/5000 [03:14<00:00, 25.70draws/s]
The acceptance probability does not match the target. It is 0.6657586191001563, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} e, & \text{if } t \le s \\ l, & \text{if } t \gt s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
/Users/ckrapu/miniconda3/envs/pymc3-dev/lib/python3.7/site-packages/pymc3-3.5-py3.7.egg/pymc3/model.py:1266: UserWarning: Data in disasters contains missing values and will be automatically imputed from the sampling distribution.
warnings.warn(impute_message, UserWarning)
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
Sampling 2 chains: 100%|██████████| 21000/21000 [00:30<00:00, 691.21draws/s]
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sigma=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:02<00:00, 980.21draws/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
###Output
_____no_output_____
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0).
| tau : float
| Precision (tau > 0).
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.017982
Iterations: 16
Function evaluations: 21
Gradient evaluations: 21
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = pm.find_MAP(model=basic_model, fmin=optimize.fmin_powell)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.019762
Iterations: 4
Function evaluations: 176
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from scipy import optimize
with basic_model:
# draw 500 posterior samples
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 156.08: 5%|▌ | 10932/200000 [00:01<00:31, 6082.63it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 237.04
100%|██████████| 1000/1000 [00:01<00:00, 710.94it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# obtain starting values via MAP
start = pm.find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step, start=start)
###Output
1%| | 38/5500 [00:00<00:14, 376.26it/s]
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
_ = pm.traceplot(trace)
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace)
###Output
alpha:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.905 0.099 0.001 [0.715, 1.103]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.711 0.838 0.904 0.971 1.101
beta:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.949 0.086 0.002 [0.786, 1.118]
2.599 0.507 0.014 [1.590, 3.591]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.784 0.889 0.948 1.007 1.117
1.593 2.256 2.605 2.940 3.599
sigma:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.991 0.073 0.001 [0.852, 1.134]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.859 0.941 0.986 1.037 1.147
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(y_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $y$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo!-Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_google('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1./10, testval=5.)
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s))
r = pm.StudentT('r', nu, lam=1/volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = -868.62: 32%|███▏ | 63550/200000 [00:23<00:50, 2717.77it/s]
Convergence archived at 63600
Interrupted at 63,600 [31%]: Average Loss = 559.54
100%|██████████| 2500/2500 [02:37<00:00, 15.91it/s]
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, [nu, sigma]);
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process'])
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year")
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= year, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Assigned Metropolis to switchpoint
Assigned NUTS to early_rate_log__
Assigned NUTS to late_rate_log__
Assigned Metropolis to disasters_missing
100%|██████████| 10500/10500 [01:11<00:00, 146.76it/s]
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 164.12: 5%|▌ | 10845/200000 [00:01<00:21, 8809.19it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 220.44
100%|██████████| 1000/1000 [00:01<00:00, 831.50it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is based on the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamiltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.5 (or more recent); we recommend that new users install version 3.5. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install pymc3```Or via conda:```conda install pymc3```The current development branch of PyMC3 can be installed from GitHub, also using pip:```pip install git+https://github.com/pymc-devs/pymc3```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute documentation or code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, we must assign a prior distribution to the unknown variables in the model. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
###Output
Running on PyMC3 v3.6
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sigma=10)
beta = pm.Normal('beta', mu=0, sigma=10, shape=2)
sigma = pm.HalfNormal('sigma', sigma=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sigma=10)beta = Normal('beta', mu=0, sigma=10, shape=2)sigma = HalfNormal('sigma', sigma=1)```create **stochastic** random variables with Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it is sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available in the [API documentation](https://docs.pymc.io/api.html). Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sigma=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
/Users/twiecki/working/projects/pymc/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
logp = -149.58, ||grad|| = 12.242: 100%|██████████| 19/19 [00:00<00:00, 1478.46it/s]
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
map_estimate = pm.find_MAP(model=basic_model, method='powell')
map_estimate
###Output
/Users/twiecki/working/projects/pymc/pymc3/tuning/starting.py:61: UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
warnings.warn('find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.')
0%| | 0/5000 [00:00<?, ?it/s]/Users/twiecki/anaconda3/lib/python3.6/site-packages/scipy/optimize/_minimize.py:502: RuntimeWarning: Method powell does not use gradient information (jac).
RuntimeWarning)
logp = -149.47, ||grad|| = 13.248: 100%|██████████| 177/177 [00:00<00:00, 1276.05it/s]
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different.In summary, while PyMC3 provides the function `find_MAP()`, at this point mostly for historical reasons, this function is of little use in most scenarios. If you want a point estimate you should get it from the posterior. In the next section we will see how to get a posterior using sampling methods. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the _true_ posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.`PyMC3` automatically initializes NUTS to reasonable values based on the variance of the samples obtained during a tuning phase. A little bit of noise is added to ensure different, parallel, chains start from different points. Also, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
with basic_model:
# draw 500 posterior samples
trace = pm.sample(500)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sigma, beta, alpha]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:01<00:00, 1479.97draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# instantiate sampler
step = pm.Slice()
# draw 5000 posterior samples
trace = pm.sample(5000, step=step)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>Slice: [sigma]
>Slice: [beta]
>Slice: [alpha]
Sampling 2 chains: 100%|██████████| 11000/11000 [00:09<00:00, 1156.62draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace).round(2)
###Output
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \nu &\sim exp(0.1) \\ \sigma &\sim exp(50) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^2) \\ log(r_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $r$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of 401 daily returns of the S&P 500 stock market index during the 2008 financial crisis.
###Code
import pandas as pd
returns = pd.read_csv(pm.get_data('SP500.csv'), parse_dates=True, index_col=0)
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the standard deviation of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1/10., testval=5.)
sigma = pm.Exponential('sigma', 1/0.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma=sigma, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s)**0.5)
r = pm.StudentT('r', nu=nu, sigma=volatility_process, observed=returns['change'])
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [s, sigma, nu]
Sampling 2 chains: 100%|██████████| 5000/5000 [02:04<00:00, 40.14draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
The estimated number of effective samples is smaller than 200 for some parameters.
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
pm.traceplot(trace, varnames=['nu', 'sigma']);
###Output
/Users/twiecki/working/projects/pymc/pymc3/plots/__init__.py:40: UserWarning: Keyword argument `varnames` renamed to `var_names`, and will be removed in pymc3 3.8
warnings.warn('Keyword argument `{old}` renamed to `{new}`, and will be removed in pymc3 3.8'.format(old=old, new=new))
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'C3', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process']);
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a `nan` in the pandas `Series`. These missing values will be automatically imputed by `PyMC3`. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
import pandas as pd
disaster_data = pd.Series([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, np.nan, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, np.nan, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
years = np.arange(1851, 1962)
plt.plot(years, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year");
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} e, & \text{if } t \le s \\ l, & \text{if } t \gt s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
/Users/twiecki/working/projects/pymc/pymc3/model.py:1277: UserWarning: Data in disasters contains missing values and will be automatically imputed from the sampling distribution.
warnings.warn(impute_message, UserWarning)
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Multiprocess sampling (2 chains in 2 jobs)
CompoundStep
>CompoundStep
>>Metropolis: [disasters_missing]
>>Metropolis: [switchpoint]
>NUTS: [late_rate, early_rate]
Sampling 2 chains: 100%|██████████| 21000/21000 [00:14<00:00, 1400.63draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
The number of effective samples is smaller than 10% for some parameters.
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
pm.traceplot(trace);
###Output
_____no_output_____
###Markdown
The following plot shows the switch point as an orange vertical line, together with its HPD as a semitransparent band. The dashed black line shows the accident rate.
###Code
plt.figure(figsize=(10, 8))
plt.plot(years, disaster_data, '.')
plt.ylabel("Number of accidents", fontsize=16)
plt.xlabel("Year", fontsize=16)
plt.vlines(trace['switchpoint'].mean(), disaster_data.min(), disaster_data.max(), color='C1')
average_disasters = np.zeros_like(disaster_data, dtype='float')
for i, year in enumerate(years):
idx = year < trace['switchpoint']
average_disasters[i] = (trace['early_rate'][idx].sum() + trace['late_rate'][~idx].sum()) / (len(trace) * trace.nchains)
sp_hpd = pm.hpd(trace['switchpoint'])
plt.fill_betweenx(y=[disaster_data.min(), disaster_data.max()],
x1=sp_hpd[0], x2=sp_hpd[1], alpha=0.5, color='C1');
plt.plot(years, average_disasters, 'k--', lw=2);
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC3 allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sigma=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
df = pd.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [sd, x2, x1, Intercept]
Sampling 2 chains: 100%|██████████| 2000/2000 [00:01<00:00, 1402.82draws/s]
/Users/twiecki/anaconda3/lib/python3.6/site-packages/mkl_fft/_numpy_fft.py:1044: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
output = mkl_fft.rfftn_numpy(a, s, axes)
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pd.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
Getting started with PyMC3Authors: John Salvatier, Thomas V. Wiecki, Christopher FonnesbeckNote: This text is taken from the [PeerJ CS publication on PyMC3](https://peerj.com/articles/cs-55/). AbstractProbabilistic Programming allows for automatic Bayesian inference on user-defined probabilistic models. Recent advances in Markov chain Monte Carlo (MCMC) sampling allow inference on increasingly complex models. This class of MCMC, known as Hamliltonian Monte Carlo, requires gradient information which is often not readily available. PyMC3 is a new open source Probabilistic Programming framework written in Python that uses Theano to compute gradients via automatic differentiation as well as compile probabilistic programs on-the-fly to C for increased speed. Contrary to other Probabilistic Programming languages, PyMC3 allows model specification directly in Python code. The lack of a domain specific language allows for great flexibility and direct interaction with the model. This paper is a tutorial-style introduction to this software package. IntroductionProbabilistic programming (PP) allows flexible specification of Bayesian statistical models in code. PyMC3 is a new, open-source PP framework with an intuitive and readable, yet powerful, syntax that is close to the natural syntax statisticians use to describe models. It features next-generation Markov chain Monte Carlo (MCMC) sampling algorithms such as the No-U-Turn Sampler (NUTS; Hoffman, 2014), a self-tuning variant of Hamiltonian Monte Carlo (HMC; Duane, 1987). This class of samplers works well on high dimensional and complex posterior distributions and allows many complex models to be fit without specialized knowledge about fitting algorithms. HMC and NUTS take advantage of gradient information from the likelihood to achieve much faster convergence than traditional sampling methods, especially for larger models. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo, which means you usually don't need to have specialized knowledge about how the algorithms work. PyMC3, Stan (Stan Development Team, 2014), and the LaplacesDemon package for R are currently the only PP packages to offer HMC.Probabilistic programming in Python confers a number of advantages including multi-platform compatibility, an expressive yet clean and readable syntax, easy integration with other scientific libraries, and extensibility via C, C++, Fortran or Cython. These features make it relatively straightforward to write and use custom statistical distributions, samplers and transformation functions, as required by Bayesian analysis.While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called *tensors*, which are tightly integrated with the popular NumPy `ndarray` data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration.Here, we present a primer on the use of PyMC3 for solving general Bayesian statistical inference and prediction problems. We will first see the basics of how to use PyMC3, motivated by a simple example: installation, data creation, model definition, model fitting and posterior analysis. Then we will cover two case studies and use them to show how to define and fit more sophisticated models. Finally we will show how to extend PyMC3 and discuss other useful features: the Generalized Linear Models subpackage, custom distributions, custom transformations and alternative storage backends. InstallationRunning PyMC3 requires a working Python interpreter, either version 2.7 (or more recent) or 3.4 (or more recent); we recommend that new users install version 3.4. A complete Python installation for Mac OSX, Linux and Windows can most easily be obtained by downloading and installing the free [`Anaconda Python Distribution`](https://store.continuum.io/cshop/anaconda/) by ContinuumIO. `PyMC3` can be installed using `pip` (https://pip.pypa.io/en/latest/installing.html):```pip install git+https://github.com/pymc-devs/pymc3```PyMC3 depends on several third-party Python packages which will be automatically installed when installing via pip. The four required dependencies are: `Theano`, `NumPy`, `SciPy`, and `Matplotlib`. To take full advantage of PyMC3, the optional dependencies `Pandas` and `Patsy` should also be installed. These are *not* automatically installed, but can be installed by:```pip install patsy pandas```The source code for PyMC3 is hosted on GitHub at https://github.com/pymc-devs/pymc3 and is distributed under the liberal [Apache License 2.0](https://github.com/pymc-devs/pymc3/blob/master/LICENSE). On the GitHub site, users may also report bugs and other issues, as well as contribute code to the project, which we actively encourage. A Motivating Example: Linear RegressionTo introduce model definition, fitting and posterior analysis, we first consider a simple Bayesian linear regression model with normal priors for the parameters. We are interested in predicting outcomes $Y$ as normally-distributed observations with an expected value $\mu$ that is a linear function of two predictor variables, $X_1$ and $X_2$.$$\begin{aligned} Y &\sim \mathcal{N}(\mu, \sigma^2) \\\mu &= \alpha + \beta_1 X_1 + \beta_2 X_2\end{aligned}$$where $\alpha$ is the intercept, and $\beta_i$ is the coefficient for covariate $X_i$, while $\sigma$ represents the observation error. Since we are constructing a Bayesian model, the unknown variables in the model must be assigned a prior distribution. We choose zero-mean normal priors with variance of 100 for both regression coefficients, which corresponds to *weak* information regarding the true parameter values. We choose a half-normal distribution (normal distribution bounded at zero) as the prior for $\sigma$.$$\begin{aligned} \alpha &\sim \mathcal{N}(0, 100) \\\beta_i &\sim \mathcal{N}(0, 100) \\\sigma &\sim \lvert\mathcal{N}(0, 1){\rvert}\end{aligned}$$ Generating dataWe can simulate some artificial data from this model using only NumPy's `random` module, and then use PyMC3 to try to recover the corresponding parameters. We are intentionally generating the data to closely correspond the PyMC3 model structure.
###Code
import numpy as np
import matplotlib.pyplot as plt
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
###Output
_____no_output_____
###Markdown
Here is what the simulated data look like. We use the `pylab` module from the plotting library matplotlib.
###Code
%matplotlib inline
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4))
axes[0].scatter(X1, Y)
axes[1].scatter(X2, Y)
axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2');
###Output
_____no_output_____
###Markdown
Model SpecificationSpecifying this model in PyMC3 is straightforward because the syntax is as close to the statistical notation. For the most part, each line of Python code corresponds to a line in the model notation above. First, we import PyMC. We use the convention of importing it as `pm`.
###Code
import pymc3 as pm
###Output
_____no_output_____
###Markdown
Now we build our model, which we will present in full first, then explain each part line-by-line.
###Code
basic_model = pm.Model()
with basic_model:
# Priors for unknown model parameters
alpha = pm.Normal('alpha', mu=0, sd=10)
beta = pm.Normal('beta', mu=0, sd=10, shape=2)
sigma = pm.HalfNormal('sigma', sd=1)
# Expected value of outcome
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
###Output
_____no_output_____
###Markdown
The first line,```pythonbasic_model = Model()```creates a new `Model` object which is a container for the model random variables.Following instantiation of the model, the subsequent specification of the model components is performed inside a `with` statement:```pythonwith basic_model:```This creates a *context manager*, with our `basic_model` as the context, that includes all statements until the indented block ends. This means all PyMC3 objects introduced in the indented code block below the `with` statement are added to the model behind the scenes. Absent this context manager idiom, we would be forced to manually associate each of the variables with `basic_model` right after we create them. If you try to create a new random variable without a `with model:` statement, it will raise an error since there is no obvious model for the variable to be added to.The first three statements in the context manager:```pythonalpha = Normal('alpha', mu=0, sd=10)beta = Normal('beta', mu=0, sd=10, shape=2)sigma = HalfNormal('sigma', sd=1)```create a **stochastic** random variables with a Normal prior distributions for the regression coefficients with a mean of 0 and standard deviation of 10 for the regression coefficients, and a half-normal distribution for the standard deviation of the observations, $\sigma$. These are stochastic because their values are partly determined by its parents in the dependency graph of random variables, which for priors are simple constants, and partly random (or stochastic). We call the `Normal` constructor to create a random variable to use as a normal prior. The first argument is always the *name* of the random variable, which should almost always match the name of the Python variable being assigned to, since it sometimes used to retrieve the variable from the model for summarizing output. The remaining required arguments for a stochastic object are the parameters, in this case `mu`, the mean, and `sd`, the standard deviation, which we assign hyperparameter values for the model. In general, a distribution's parameters are values that determine the location, shape or scale of the random variable, depending on the parameterization of the distribution. Most commonly used distributions, such as `Beta`, `Exponential`, `Categorical`, `Gamma`, `Binomial` and many others, are available in PyMC3.The `beta` variable has an additional `shape` argument to denote it as a vector-valued parameter of size 2. The `shape` argument is available for all distributions and specifies the length or shape of the random variable, but is optional for scalar variables, since it defaults to a value of one. It can be an integer, to specify an array, or a tuple, to specify a multidimensional array (*e.g.* `shape=(5,7)` makes random variable that takes on 5 by 7 matrix values). Detailed notes about distributions, sampling methods and other PyMC3 functions are available via the `help` function.
###Code
help(pm.Normal) #try help(Model), help(Uniform) or help(basic_model)
###Output
Help on class Normal in module pymc3.distributions.continuous:
class Normal(pymc3.distributions.distribution.Continuous)
| Univariate normal log-likelihood.
|
| .. math::
|
| f(x \mid \mu, \tau) =
| \sqrt{\frac{\tau}{2\pi}}
| \exp\left\{ -\frac{\tau}{2} (x-\mu)^2 \right\}
|
| ======== ==========================================
| Support :math:`x \in \mathbb{R}`
| Mean :math:`\mu`
| Variance :math:`\dfrac{1}{\tau}` or :math:`\sigma^2`
| ======== ==========================================
|
| Normal distribution can be parameterized either in terms of precision
| or standard deviation. The link between the two parametrizations is
| given by
|
| .. math::
|
| \tau = \dfrac{1}{\sigma^2}
|
| Parameters
| ----------
| mu : float
| Mean.
| sd : float
| Standard deviation (sd > 0).
| tau : float
| Precision (tau > 0).
|
| Method resolution order:
| Normal
| pymc3.distributions.distribution.Continuous
| pymc3.distributions.distribution.Distribution
| builtins.object
|
| Methods defined here:
|
| __init__(self, mu=0, sd=None, tau=None, **kwargs)
| Initialize self. See help(type(self)) for accurate signature.
|
| logp(self, value)
|
| random(self, point=None, size=None, repeat=None)
|
| ----------------------------------------------------------------------
| Methods inherited from pymc3.distributions.distribution.Distribution:
|
| __getnewargs__(self)
|
| default(self)
|
| get_test_val(self, val, defaults)
|
| getattr_value(self, val)
|
| ----------------------------------------------------------------------
| Class methods inherited from pymc3.distributions.distribution.Distribution:
|
| dist(*args, **kwargs) from builtins.type
|
| ----------------------------------------------------------------------
| Static methods inherited from pymc3.distributions.distribution.Distribution:
|
| __new__(cls, name, *args, **kwargs)
| Create and return a new object. See help(type) for accurate signature.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from pymc3.distributions.distribution.Distribution:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Having defined the priors, the next statement creates the expected value `mu` of the outcomes, specifying the linear relationship:```pythonmu = alpha + beta[0]*X1 + beta[1]*X2```This creates a **deterministic** random variable, which implies that its value is *completely* determined by its parents' values. That is, there is no uncertainty beyond that which is inherent in the parents' values. Here, `mu` is just the sum of the intercept `alpha` and the two products of the coefficients in `beta` and the predictor variables, whatever their values may be. PyMC3 random variables and data can be arbitrarily added, subtracted, divided, multiplied together and indexed-into to create new random variables. This allows for great model expressivity. Many common mathematical functions like `sum`, `sin`, `exp` and linear algebra functions like `dot` (for inner product) and `inv` (for inverse) are also provided. The final line of the model, defines `Y_obs`, the sampling distribution of the outcomes in the dataset.```pythonY_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)```This is a special case of a stochastic variable that we call an **observed stochastic**, and represents the data likelihood of the model. It is identical to a standard stochastic, except that its `observed` argument, which passes the data to the variable, indicates that the values for this variable were observed, and should not be changed by any fitting algorithm applied to the model. The data can be passed in the form of either a `numpy.ndarray` or `pandas.DataFrame` object.Notice that, unlike for the priors of the model, the parameters for the normal distribution of `Y_obs` are not fixed values, but rather are the deterministic object `mu` and the stochastic `sigma`. This creates parent-child relationships between the likelihood and these two variables. Model fittingHaving completely specified our model, the next step is to obtain posterior estimates for the unknown variables in the model. Ideally, we could calculate the posterior estimates analytically, but for most non-trivial models, this is not feasible. We will consider two approaches, whose appropriateness depends on the structure of the model and the goals of the analysis: finding the *maximum a posteriori* (MAP) point using optimization methods, and computing summaries based on samples drawn from the posterior distribution using Markov Chain Monte Carlo (MCMC) sampling methods. Maximum a posteriori methodsThe **maximum a posteriori (MAP)** estimate for a model, is the mode of the posterior distribution and is generally found using numerical optimization methods. This is often fast and easy to do, but only gives a point estimate for the parameters and can be biased if the mode isn't representative of the distribution. PyMC3 provides this functionality with the `find_MAP` function.Below we find the MAP for our original model. The MAP is returned as a parameter **point**, which is always represented by a Python dictionary of variable names to NumPy arrays of parameter values.
###Code
map_estimate = pm.find_MAP(model=basic_model)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.017982
Iterations: 16
Function evaluations: 21
Gradient evaluations: 21
###Markdown
By default, `find_MAP` uses the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimization algorithm to find the maximum of the log-posterior but also allows selection of other optimization algorithms from the `scipy.optimize` module. For example, below we use Powell's method to find the MAP.
###Code
from scipy import optimize
map_estimate = pm.find_MAP(model=basic_model, fmin=optimize.fmin_powell)
map_estimate
###Output
Optimization terminated successfully.
Current function value: 149.019762
Iterations: 4
Function evaluations: 176
###Markdown
It is important to note that the MAP estimate is not always reasonable, especially if the mode is at an extreme. This can be a subtle issue; with high dimensional posteriors, one can have areas of extremely high density but low total probability because the volume is very small. This will often occur in hierarchical models with the variance parameter for the random effect. If the individual group means are all the same, the posterior will have near infinite density if the scale parameter for the group means is almost zero, even though the probability of such a small scale parameter will be small since the group means must be extremely close together. Most techniques for finding the MAP estimate also only find a *local* optimum (which is often good enough), but can fail badly for multimodal posteriors if the different modes are meaningfully different. Sampling methodsThough finding the MAP is a fast and easy way of obtaining estimates of the unknown model parameters, it is limited because there is no associated estimate of uncertainty produced with the MAP estimates. Instead, a simulation-based approach such as Markov chain Monte Carlo (MCMC) can be used to obtain a Markov chain of values that, given the satisfaction of certain conditions, are indistinguishable from samples from the posterior distribution. To conduct MCMC sampling to generate posterior samples in PyMC3, we specify a **step method** object that corresponds to a particular MCMC algorithm, such as Metropolis, Slice sampling, or the No-U-Turn Sampler (NUTS). PyMC3's `step_methods` submodule contains the following samplers: `NUTS`, `Metropolis`, `Slice`, `HamiltonianMC`, and `BinaryMetropolis`. These step methods can be assigned manually, or assigned automatically by PyMC3. Auto-assignment is based on the attributes of each variable in the model. In general:* Binary variables will be assigned to `BinaryMetropolis`* Discrete variables will be assigned to `Metropolis`* Continuous variables will be assigned to `NUTS`Auto-assignment can be overriden for any subset of variables by specifying them manually prior to sampling. Gradient-based sampling methodsPyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. NUTS requires a scaling matrix parameter, which is analogous to the variance parameter for the jump proposal distribution in Metropolis-Hastings, although NUTS uses it somewhat differently. The matrix gives the rough shape of the distribution so that NUTS does not make jumps that are too large in some directions and too small in other directions. It is important to set this scaling parameter to a reasonable value to facilitate efficient sampling. This is especially true for models that have many unobserved stochastic random variables or models with highly non-normal posterior distributions. Poor scaling parameters will slow down NUTS significantly, sometimes almost stopping it completely. A reasonable starting point for sampling can also be important for efficient sampling, but not as often.Fortunately `PyMC3` automatically initializes NUTS using another inference algorithm called ADVI (auto-diff variational inference). Moreover, `PyMC3` will automatically assign an appropriate sampler if we don't supply it via the `step` keyword argument (see below for an example of how to explicitly assign step methods).
###Code
from scipy import optimize
with basic_model:
# draw 500 posterior samples
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 156.13: 5%|▌ | 10884/200000 [00:01<00:19, 9870.68it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 237.04
100%|██████████| 1000/1000 [00:00<00:00, 1156.78it/s]
###Markdown
The `sample` function runs the step method(s) assigned (or passed) to it for the given number of iterations and returns a `Trace` object containing the samples collected, in the order they were collected. The `trace` object can be queried in a similar way to a `dict` containing a map from variable names to `numpy.array`s. The first dimension of the array is the sampling index and the later dimensions match the shape of the variable. We can see the last 5 values for the `alpha` variable as follows:
###Code
trace['alpha'][-5:]
###Output
_____no_output_____
###Markdown
If we wanted to use the slice sampling algorithm to `sigma` instead of NUTS (which was assigned automatically), we could have specified this as the `step` argument for `sample`.
###Code
with basic_model:
# obtain starting values via MAP
start = pm.find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = pm.Slice(vars=[sigma])
# draw 5000 posterior samples
trace = pm.sample(5000, step=step, start=start)
###Output
Assigned NUTS to alpha
Assigned NUTS to beta
###Markdown
Posterior analysis`PyMC3` provides plotting and summarization functions for inspecting the sampling output. A simple posterior plot can be created using `traceplot`.
###Code
_ = pm.traceplot(trace)
###Output
_____no_output_____
###Markdown
The left column consists of a smoothed histogram (using kernel density estimation) of the marginal posteriors of each stochastic random variable while the right column contains the samples of the Markov chain plotted in sequential order. The `beta` variable, being vector-valued, produces two histograms and two sample traces, corresponding to both predictor coefficients.In addition, the `summary` function provides a text-based output of common posterior statistics:
###Code
pm.summary(trace)
###Output
alpha:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.907 0.099 0.001 [0.708, 1.096]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.711 0.840 0.907 0.973 1.101
beta:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.949 0.087 0.001 [0.789, 1.129]
2.619 0.509 0.016 [1.613, 3.601]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.777 0.891 0.948 1.006 1.119
1.620 2.280 2.621 2.970 3.611
sigma:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.990 0.070 0.001 [0.858, 1.130]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.863 0.941 0.985 1.034 1.136
###Markdown
Case study 1: Stochastic volatilityWe present a case study of stochastic volatility, time varying stock market volatility, to illustrate PyMC3's use in addressing a more realistic problem. The distribution of market returns is highly non-normal, which makes sampling the volatilities significantly more difficult. This example has 400+ parameters so using common sampling algorithms like Metropolis-Hastings would get bogged down, generating highly autocorrelated samples. Instead, we use NUTS, which is dramatically more efficient. The ModelAsset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others they are very stable. Stochastic volatility models address this with a latent volatility variable, which changes over time. The following model is similar to the one described in the NUTS paper (Hoffman 2014, p. 21).$$\begin{aligned} \sigma &\sim exp(50) \\ \nu &\sim exp(.1) \\ s_i &\sim \mathcal{N}(s_{i-1}, \sigma^{-2}) \\ log(y_i) &\sim t(\nu, 0, exp(-2 s_i))\end{aligned}$$Here, $y$ is the daily return series which is modeled with a Student-t distribution with an unknown degrees of freedom parameter, and a scale parameter determined by a latent process $s$. The individual $s_i$ are the individual daily log volatilities in the latent log volatility process. The DataOur data consist of daily returns of the S&P 500 during the 2008 financial crisis. Here, we use `pandas-datareader` to obtain the price data from Yahoo!-Finance; it can be installed with `pip install pandas-datareader`.
###Code
from pandas_datareader import data
import pandas as pd
returns = data.get_data_google('SPY', start='2008-5-1', end='2009-12-1')['Close'].pct_change()
len(returns)
returns.plot(figsize=(10, 6))
plt.ylabel('daily returns in %');
###Output
_____no_output_____
###Markdown
Model SpecificationAs with the linear regression example, specifying the model in PyMC3 mirrors its statistical specification. This model employs several new distributions: the `Exponential` distribution for the $\nu$ and $\sigma$ priors, the Student-T (`StudentT`) distribution for distribution of returns, and the `GaussianRandomWalk` for the prior for the latent volatilities. In PyMC3, variables with purely positive priors like `Exponential` are transformed with a log transform. This makes sampling more robust. Behind the scenes, a variable in the unconstrained space (named "variableName_log") is added to the model for sampling. In this model this happens behind the scenes for both the degrees of freedom, `nu`, and the scale parameter for the volatility process, `sigma`, since they both have exponential priors. Variables with priors that constrain them on two sides, like `Beta` or `Uniform`, are also transformed to be unconstrained but with a log odds transform. Although, unlike model specification in PyMC2, we do not typically provide starting points for variables at the model specification stage, we can also provide an initial value for any distribution (called a "test value") using the `testval` argument. This overrides the default test value for the distribution (usually the mean, median or mode of the distribution), and is most often useful if some values are illegal and we want to ensure we select a legal one. The test values for the distributions are also used as a starting point for sampling and optimization by default, though this is easily overriden. The vector of latent volatilities `s` is given a prior distribution by `GaussianRandomWalk`. As its name suggests GaussianRandomWalk is a vector valued distribution where the values of the vector form a random normal walk of length n, as specified by the `shape` argument. The scale of the innovations of the random walk, `sigma`, is specified in terms of the precision of the normally distributed innovations and can be a scalar or vector.
###Code
with pm.Model() as sp500_model:
nu = pm.Exponential('nu', 1./10, testval=5.)
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
s = pm.GaussianRandomWalk('s', sigma**-2, shape=len(returns))
volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2*s))
r = pm.StudentT('r', nu, lam=1/volatility_process, observed=returns)
###Output
_____no_output_____
###Markdown
Notice that we transform the log volatility process `s` into the volatility process by `exp(-2*s)`. Here, `exp` is a Theano function, rather than the corresponding function in NumPy; Theano provides a large subset of the mathematical functions that NumPy does.Also note that we have declared the `Model` name `sp500_model` in the first occurrence of the context manager, rather than splitting it into two lines, as we did for the first example. Fitting
###Code
with sp500_model:
trace = pm.sample(2000)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = -868.31: 32%|███▏ | 63343/200000 [00:21<00:47, 2879.06it/s]
Convergence archived at 63600
Interrupted at 63,600 [31%]: Average Loss = 559.54
100%|██████████| 2500/2500 [02:31<00:00, 13.50it/s]
###Markdown
We can check our samples by looking at the traceplot for `nu` and `sigma`.
###Code
_ = pm.traceplot(trace, [nu, sigma])
###Output
_____no_output_____
###Markdown
Finally we plot the distribution of volatility paths by plotting many of our sampled volatility paths on the same graph. Each is rendered partially transparent (via the `alpha` argument in Matplotlib's `plot` function) so the regions where many paths overlap are shaded more darkly.
###Code
fig, ax = plt.subplots(figsize=(15, 8))
returns.plot(ax=ax)
ax.plot(returns.index, 1/np.exp(trace['s',::5].T), 'r', alpha=.03);
ax.set(title='volatility_process', xlabel='time', ylabel='volatility');
ax.legend(['S&P500', 'stochastic volatility process'])
###Output
_____no_output_____
###Markdown
As you can see, the model correctly infers the increase in volatility during the 2008 financial crash. Moreover, note that this model is quite complex because of its high dimensionality and dependency-structure in the random walk distribution. NUTS as implemented in PyMC3, however, correctly infers the posterior distribution with ease. Case study 2: Coal mining disastersConsider the following time series of recorded coal mining disasters in the UK from 1851 to 1962 (Jarrett, 1979). The number of disasters is thought to have been affected by changes in safety regulations during this period. Unfortunately, we also have pair of years with missing data, identified as missing by a NumPy MaskedArray using -999 as the marker value. Next we will build a model for this series and attempt to estimate when the change occurred. At the same time, we will see how to handle missing data, use multiple samplers and sample from discrete random variables.
###Code
disaster_data = np.ma.masked_values([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], value=-999)
year = np.arange(1851, 1962)
plt.plot(year, disaster_data, 'o', markersize=8);
plt.ylabel("Disaster count")
plt.xlabel("Year")
###Output
_____no_output_____
###Markdown
Occurrences of disasters in the time series is thought to follow a Poisson process with a large rate parameter in the early part of the time series, and from one with a smaller rate in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.In our model, $$ \begin{aligned} D_t &\sim \text{Pois}(r_t), r_t= \begin{cases} l, & \text{if } t \lt s \\ e, & \text{if } t \ge s \end{cases} \\ s &\sim \text{Unif}(t_l, t_h)\\ e &\sim \text{exp}(1)\\ l &\sim \text{exp}(1) \end{aligned}$$the parameters are defined as follows: * $D_t$: The number of disasters in year $t$ * $r_t$: The rate parameter of the Poisson distribution of disasters in year $t$. * $s$: The year in which the rate parameter changes (the switchpoint). * $e$: The rate parameter before the switchpoint $s$. * $l$: The rate parameter after the switchpoint $s$. * $t_l$, $t_h$: The lower and upper boundaries of year $t$. This model is built much like our previous models. The major differences are the introduction of discrete variables with the Poisson and discrete-uniform priors and the novel form of the deterministic random variable `rate`.
###Code
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=year.min(), upper=year.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# Allocate appropriate Poisson rates to years before and after current
rate = pm.math.switch(switchpoint >= year, early_rate, late_rate)
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
###Output
_____no_output_____
###Markdown
The logic for the rate random variable,```pythonrate = switch(switchpoint >= year, early_rate, late_rate)```is implemented using `switch`, a Theano function that works like an if statement. It uses the first argument to switch between the next two arguments.Missing values are handled transparently by passing a `MaskedArray` or a `pandas.DataFrame` with NaN values to the `observed` argument when creating an observed stochastic random variable. Behind the scenes, another random variable, `disasters.missing_values` is created to model the missing values. All we need to do to handle the missing values is ensure we sample this random variable as well. Unfortunately because they are discrete variables and thus have no meaningful gradient, we cannot use NUTS for sampling `switchpoint` or the missing disaster observations. Instead, we will sample using a `Metroplis` step method, which implements adaptive Metropolis-Hastings, because it is designed to handle discrete values. `PyMC3` automatically assigns the correct sampling algorithms.
###Code
with disaster_model:
trace = pm.sample(10000)
###Output
Assigned Metropolis to switchpoint
Assigned NUTS to early_rate_log__
Assigned NUTS to late_rate_log__
Assigned Metropolis to disasters_missing
100%|██████████| 10500/10500 [00:11<00:00, 929.53it/s]
###Markdown
In the trace plot below we can see that there's about a 10 year span that's plausible for a significant change in safety, but a 5 year span that contains most of the probability mass. The distribution is jagged because of the jumpy relationship between the year switchpoint and the likelihood and not due to sampling error.
###Code
_ = pm.traceplot(trace)
###Output
_____no_output_____
###Markdown
Arbitrary deterministicsDue to its reliance on Theano, PyMC3 provides many mathematical functions and operators for transforming random variables into new random variables. However, the library of functions in Theano is not exhaustive, therefore Theano and PyMC3 provide functionality for creating arbitrary Theano functions in pure Python, and including these functions in PyMC models. This is supported with the `as_op` function decorator.Theano needs to know the types of the inputs and outputs of a function, which are specified for `as_op` by `itypes` for inputs and `otypes` for outputs. The Theano documentation includes [an overview of the available types](http://deeplearning.net/software/theano/library/tensor/basic.htmlall-fully-typed-constructors).
###Code
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.lscalar], otypes=[tt.lscalar])
def crazy_modulo3(value):
if value > 0:
return value % 3
else :
return (-value + 1) % 3
with pm.Model() as model_deterministic:
a = pm.Poisson('a', 1)
b = crazy_modulo3(a)
###Output
_____no_output_____
###Markdown
An important drawback of this approach is that it is not possible for `theano` to inspect these functions in order to compute the gradient required for the Hamiltonian-based samplers. Therefore, it is not possible to use the HMC or NUTS samplers for a model that uses such an operator. However, it is possible to add a gradient if we inherit from `theano.Op` instead of using `as_op`. The PyMC example set includes [a more elaborate example of the usage of as_op](https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_arbitrary_deterministic.py). Arbitrary distributionsSimilarly, the library of statistical distributions in PyMC3 is not exhaustive, but PyMC allows for the creation of user-defined functions for an arbitrary probability distribution. For simple statistical distributions, the `DensityDist` function takes as an argument any function that calculates a log-probability $log(p(x))$. This function may employ other random variables in its calculation. Here is an example inspired by a blog post by Jake Vanderplas on which priors to use for a linear regression (Vanderplas, 2014). ```pythonimport theano.tensor as ttwith pm.Model() as model: alpha = pm.Uniform('intercept', -100, 100) Create custom densities beta = pm.DensityDist('beta', lambda value: -1.5 * tt.log(1 + value**2), testval=0) eps = pm.DensityDist('eps', lambda value: -tt.log(tt.abs_(value)), testval=1) Create likelihood like = pm.Normal('y_est', mu=alpha + beta * X, sd=eps, observed=Y)``` For more complex distributions, one can create a subclass of `Continuous` or `Discrete` and provide the custom `logp` function, as required. This is how the built-in distributions in PyMC are specified. As an example, fields like psychology and astrophysics have complex likelihood functions for a particular process that may require numerical approximation. In these cases, it is impossible to write the function in terms of predefined theano operators and we must use a custom theano operator using `as_op` or inheriting from `theano.Op`. Implementing the `beta` variable above as a `Continuous` subclass is shown below, along with a sub-function.
###Code
class Beta(pm.Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
###Output
_____no_output_____
###Markdown
If your logp can not be expressed in Theano, you can decorate the function with `as_op` as follows: `@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])`. Note, that this will create a blackbox Python function that will be much slower and not provide the gradients necessary for e.g. NUTS. Generalized Linear ModelsGeneralized Linear Models (GLMs) are a class of flexible models that are widely used to estimate regression relationships between a single outcome variable and one or multiple predictors. Because these models are so common, `PyMC3` offers a `glm` submodule that allows flexible creation of various GLMs with an intuitive `R`-like syntax that is implemented via the `patsy` module.The `glm` submodule requires data to be included as a `pandas` `DataFrame`. Hence, for our linear regression example:
###Code
# Convert X and Y to a pandas DataFrame
import pandas
df = pandas.DataFrame({'x1': X1, 'x2': X2, 'y': Y})
###Output
_____no_output_____
###Markdown
The model can then be very concisely specified in one line of code.
###Code
from pymc3.glm import GLM
with pm.Model() as model_glm:
GLM.from_formula('y ~ x1 + x2', df)
trace = pm.sample()
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using ADVI...
Average Loss = 164.07: 5%|▌ | 10879/200000 [00:01<00:18, 9972.08it/s]
Convergence archived at 11100
Interrupted at 11,100 [5%]: Average Loss = 220.44
100%|██████████| 1000/1000 [00:00<00:00, 1162.95it/s]
###Markdown
The error distribution, if not specified via the `family` argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a `Binomial` family object.
###Code
from pymc3.glm.families import Binomial
df_logistic = pandas.DataFrame({'x1': X1, 'y': Y > np.median(Y)})
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
###Output
_____no_output_____
###Markdown
For a more complete and flexible formula interface, including hierarchical GLMs, see [Bambi](https://github.com/bambinos/bambi). Backends`PyMC3` has support for different ways to store samples during and after sampling, called backends, including in-memory (default), text file, and SQLite. These can be found in `pymc.backends`:By default, an in-memory `ndarray` is used but if the samples would get too large to be held in memory we could use the `hdf5` backend:
###Code
from pymc3.backends import HDF5
with pm.Model() as model_glm_logistic:
GLM.from_formula('y ~ x1', df_logistic, family=Binomial())
backend = HDF5('trace.h5')
trace = pm.sample(trace=backend)
backend.close()
pm.summary(trace, varnames=['x1'])
###Output
x1:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
-1.404 0.283 0.016 [-1.950, -0.862]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
-2.020 -1.582 -1.401 -1.208 -0.891
###Markdown
The stored trace can then later be loaded using the `load` command:
###Code
from pymc3.backends.hdf5 import load
with basic_model:
trace_loaded = load('trace.h5')
###Output
_____no_output_____ |
tutorials/pet_aws.ipynb | ###Markdown
Elastic training with Classy VisionThis tutorial will demonstrate how to use [PyTorch Elastic](https://github.com/pytorch/elastic) with Classy Vision. Prerequisites1. (recommended) cloud provider instance with GPUs;2. [Docker](https://docs.docker.com/get-docker/)3. [NVidia container toolkit](https://github.com/NVIDIA/nvidia-docker) 1. Setup Download the PyTorch Elastic repository and install it. Run in your terminal:
###Code
! git clone https://github.com/pytorch/elastic.git
! pip install torchelastic
###Output
_____no_output_____
###Markdown
Download and install Classy Vision:
###Code
! git clone https://github.com/facebookresearch/ClassyVision.git
! pip install classy_vision
###Output
_____no_output_____
###Markdown
If needed, install Docker:
###Code
! sudo apt install docker-compose
###Output
_____no_output_____
###Markdown
To run torchelastic manually you'll also need etcd:
###Code
! sudo apt install etcd-server
###Output
_____no_output_____
###Markdown
Set this environment variable to your current `torchelastic` version. This tutorial only works for version >= 0.2.0:
###Code
! export VERSION=<torchelastic version>
###Output
_____no_output_____
###Markdown
1. Single node, multi-GPU trainingThe easiest way to get started is to use our example docker image. Run the following in your shell:``` export NUM_CUDA_DEVICES=2```
###Code
$ docker run --shm-size=2g --gpus=all torchelastic/examples:$VERSION
--standalone
--nnodes=1
--nproc_per_node=$NUM_CUDA_DEVICES
/workspace/classy_vision/classy_train.py
--device=gpu
--config_file /workspace/classy_vision/configs/template_config.json
###Output
_____no_output_____
###Markdown
If you don't have GPUs available, simply drop the `--gpus=all` flag. This will download and launch our example Docker container and start training on the current machine using torchelastic and Classy Vision. This is fine as a sanity check, but elasticity is really intended to help with training on multiple nodes. The next section will walk you through that. 2. Launching torchelastic manually Now let's replicate what the Docker example in the previous section did, to see how things work under the hood. torchelastic provides a drop-in replacement for `torch.distributed.launch` and that's compatible with Classy Vision's `classy_train.py`. The main difference is that torchelastic requires launching an `etcd` server so that the workers know how to communicate with each other. In your shell, run this:
###Code
! classy-project my-project
%cd my-project
###Output
_____no_output_____
###Markdown
Launch the etcd server:
###Code
! etcd --enable-v2 --listen-client-urls http://0.0.0.0:2379,http://127.0.0.1:4001 --advertise-client-urls http://127.0.0.1:2379
###Output
_____no_output_____
###Markdown
This might fail if you alread have an etcd server running. torchelastic requires etcd v2 in order to work properly, so make sure to kill any etcd instances that you have running already. Start training:
###Code
! python -m torchelastic.distributed.launch --nproc_per_node=$NUM_CUDA_DEVICES --rdzv_endpoint 127.0.0.1:2379 \
./classy_train.py --config configs/template_config.json --distributed_backend ddp
###Output
_____no_output_____
###Markdown
That's it! The training script should start running with torchelastic enabled.Take a look at this [link](http://pytorch.org/elastic/0.2.0/train_script.html) for the full documentation on how `torchelastic.distributed.launch` works. 3. Multi-container `torchelastic` is meant to help with distributed training on multiple machines. In this part, we will simulate a multiple machine setup by launching multiple containers in the same host. Set this environment variable for the location of your ClassyVision repository:```export CLASSY_VISION_HOME=~/ClassyVision```In your shell, run:
###Code
cd $CLASSY_VISION_HOME/examples/elastic
classy-project my_project
###Output
_____no_output_____
###Markdown
This will setup a Classy Vision project within the examples folder, which our containers will use as the training script. Now launch the containers:
###Code
docker-compose up
###Output
_____no_output_____
###Markdown
Elastic training with Classy VisionThis tutorial will demonstrate how to launch an training job on Amazon Web Services ([AWS](https://aws.amazon.com/)) using [PyTorch Elastic](https://github.com/pytorch/elastic) and Classy Vision. Prerequisites1. Familiarity with basic AWS (EC2, Auto Scaling Groups, S3, EFS).2. (suggested) install and setup [`awscli`](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html).3. Basic knowledge of containers (we use Docker in our examples). 1. Setup Download the PyTorch Elastic repository and install it. Run in your terminal:
###Code
! git clone https://github.com/pytorch/elastic.git
! pip install torchelastic
###Output
_____no_output_____
###Markdown
Install the required dependencies for AWS:
###Code
% cd elastic/aws
! pip install -r requirements.txt
###Output
_____no_output_____ |
T2_LT6A_DMW_MP3.ipynb | ###Markdown
Clustering the Tokyo, Japan Airbnb listings made by superhosts by room features, booking, and review featuresby Nika Espiritu and Daryll Tumambing Executive Summary What characterizes superhost listings? What are the types of superhost listings, such that a traveler can discern where they should stay? For these superhosts, what can they do to stand amongst other superhosts? To answer this, the Airbnb `listings` dataset of Tokyo, Kantō, Japan was used. A pre-COVID scenario was considered; thus, the December 30, 2019 crawl date was used. The dataset was then trimmed down by only taking the listings posted by superhosts. From here, the researchers conducted clustering on the resulting superhosts dataset and searched for common characteristics between the listings through an exploratory data analysis on each cluster found. By conducting principal component analysis and k-medians clustering, the researchers were able to come up with 3 distinct clusters, fit for 3 customer types: high-paying guests, families, and budget travelers. Distinct features of each cluster were identified through exploratory data analysis on variables related to property type, price, ratings, and amenities. High-paying guests are faced with cost-inefficient Airbnb listings. Families are presented with listings that are more spacious and have more family-friendly amenities. Finally, budget travelers can choose from budget-friendly options, all rich with amenities similar to listings fit for families. IntroductionAirbnb has been known as a disruptor in the hospitality industry with the numerous listings available on the platform that rival the experience provided by typical hotels and hostels. However, a key distinction of one listing from another is the “superhost” badge placed on it. With this badge, superhosts are able to have prime spots on search results. They may also command a higher price point, given the extra visibility and trust placed on them. With this in mind, the researchers are interested in answering the following question: what characterizes superhost listings? What are their similarities and differences? For existing and incoming Airbnb hosts, this is an interesting problem, as this paper provides insights on getting ahead in the Airbnb market. For travelers, they are given insights on the typical Airbnb in an area. Most importantly, this paper gives insights on the inherent customer segmentation done by Airbnb. The researchers used the Airbnb `listings` dataset of Tokyo, Kantō, Japan, found on Jojie. The latest pre-COVID scenario was considered in the analysis, i.e. December 30, 2019, thus pertaining to all listings found on the Airbnb platform on December 30, 2019. After conducting data pre-processing, principal component analysis, and k-means and k-medians clustering were conducted. Dataset The researchers utilized the `listings.csv.gz` Airbnb dataset of Tokyo, Kantō, Japan from the 30 December 2019 crawl date. The following libraries were used to aid in the analysis.
###Code
import pandas as pd
import seaborn as sns
import numpy as np
import sys
import os
import json
import re
import gzip
import seaborn as sns
from tqdm.autonotebook import tqdm
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
from sklearn.metrics import calinski_harabasz_score, silhouette_score
from sklearn.cluster import KMeans
from pyclustering.cluster.kmedians import kmedians
import folium
import IPython
np.set_printoptions(threshold=sys.maxsize)
listings_df = pd.read_csv('/mnt/data/public/insideairbnb/data.insideairbnb.com/'
'japan/kantō/tokyo/2019-12-30/data/listings.csv.gz',
compression='gzip',
low_memory=False)
print(listings_df.shape)
listings_df.head(5)
###Output
(14550, 106)
###Markdown
Data pre-processing All listings made by superhosts were first isolated.
###Code
superhosts_listings_df = listings_df[listings_df['host_is_superhost']=='t']
###Output
_____no_output_____
###Markdown
The following columns were dropped because all these are non-existent: `thumbnail_url`, `medium_url`, `xl_picture_url`, `neighbourhood_group_cleansed`, `jurisdiction_names`, `host_acceptance_rate`, and `experiences_offered`. These may have been phased out as the Airbnb dataset evolved.
###Code
superhosts_listings_df = superhosts_listings_df.drop(['thumbnail_url',
'medium_url', 'xl_picture_url', 'neighbourhood_group_cleansed',
'jurisdiction_names', 'host_acceptance_rate',
'experiences_offered'], axis=1)
###Output
_____no_output_____
###Markdown
The following columns were dropped because all listings have similar values in these columns. 1. `is_business_travel_ready`: signifies if an Airbnb listing is ready for business travel, and interestingly all listings are *not* ready for business travel. This may also be traced back to the researchers' chosen crawl date: December 30, 2019, a date covered by the holiday season. 2. `requires_license`: signifies if a host requires a license from the guest to be able to stay. This will be discussed more on _[blank]_. All listings surprisingly require a license. 3. `license`: details out a host's license. All listings of superhosts have a registered license. 4. `has_availability`: All listings are available. 5. `country_code`: All listings follow the `JP` code. 6. `country`: All listings specify `Japan`. 7. `host_is_superhost`: All listings are owned by superhosts. 8. `state`: All listings are found in Tokyo.
###Code
superhosts_listings_df = superhosts_listings_df\
.drop(['is_business_travel_ready',
'requires_license', 'license',
'has_availability', 'country_code',
'country', 'host_is_superhost', 'state'],
axis=1)
###Output
_____no_output_____
###Markdown
With the various data types present in the dataset, the columns of the dataset were sorted for batch processing: `existence_columns`, `to_split_columns`, `boolean_columns`, `numerical_columns`, `process_to_numerical_columns`, `dummify_columns`, and `ordinal_columns`. `existence_columns` pertained to text data that described a particular listing. For ease of analysis, this will be converted into Boolean variables, 1 if it contains text and 0 if none.
###Code
existence_columns = ['summary', 'space',
'description', 'neighborhood_overview',
'notes', 'transit', 'access', 'interaction',
'house_rules', 'host_location', 'host_about',
'host_neighbourhood', 'street', 'neighbourhood',
'smart_location']
superhosts_listings_df[existence_columns +
list(map(lambda x: f'contains_{x}',
existence_columns))] = \
superhosts_listings_df[existence_columns].\
assign(**{f'contains_{k}':
superhosts_listings_df[k].isna().\
apply(lambda x: 0 if x else 1)
for k in existence_columns})
superhosts_listings_df[superhosts_listings_df.columns[\
superhosts_listings_df.columns.str.contains('contains_')]].head(5)
###Output
_____no_output_____
###Markdown
`to_split_columns` pertain to `host_verifications` and `amenities`. Both are lists encoded as a string variable. `host_verifications` details out the various methods as to how a superhost is verified. A listing with `[Facebook, Government ID, Email]` means the host of the listing is verified based on these methods. The list can contain any of the following verification methods: `weibo`, `zhima_selfie`, `reviews`, `government_id`, `kba`, `facebook`, `offline_government_id`, `google`, `sesame_offline`, `selfie`, `jumio`, `work_email`, `manual_offline`, `manual_online`, `identity_manual`, `phone`, `sesame`, `email`, or `sent_id`. Host verifications was converted into Boolean variable, one column for each method. It takes the value of 1 if the host is verified through the method, and 0 if not. Each listing also has an `amenities`, a list containing all the available amenities in an Airbnb. A listing can have 154 possible amenities to detail out which covers items related to the kitchen, bathroom, parking, laundry, entertainment options, accessibility features for persons with disabilities, as well as child-friendly features (e.g. crib, toys for babies), among others. This was split and turned into a Boolean variable, one column for each method. It takes the value of 1 if the amenity exists in the Airbnb listing, and 0 if not.
###Code
to_split_columns = ['host_verifications', 'amenities']
superhosts_listings_df.loc[\
superhosts_listings_df['host_verifications']=='None',
['host_verifications']] = '[]'
host_verifications = superhosts_listings_df[to_split_columns]\
['host_verifications']
set_host_verifications = set()
[[set_host_verifications.add(hv) for hv in y] for
y in list(map(lambda x: json.loads(x.replace("'", '"')),
host_verifications))]
set_host_verifications = list(set_host_verifications)
superhosts_listings_df = superhosts_listings_df.assign(\
**{f'host_verifications_{k}':0 for k in set_host_verifications})
for idx, hvs in tqdm(enumerate(list(map(lambda x:
json.loads(x.replace("'", '"')),
host_verifications)))):
for hv in hvs:
superhosts_listings_df[f'host_verifications_{hv}'].iloc[idx] = 1
amenities = superhosts_listings_df[to_split_columns]['amenities']
set_amenities = set()
amnty_pattern = re.compile(r'(,)?(\w+),')
[[set_amenities.add(amnty.replace('"', '')) for amnty in y.split(',')]
for y in list(map(lambda x:
amnty_pattern.sub(r'\1"\2",', x[1:-1]), amenities))]
set_amenities = list(filter(lambda x: x, (set_amenities)))
# set_amenities
superhosts_listings_df = superhosts_listings_df.assign(**{f'amenities_{k}':0
for k in
set_amenities})
for idx, amntis in tqdm(enumerate([[amnty.replace('"', '') for amnty in
y.split(',')] for y in
list(map(lambda x:
amnty_pattern.sub(r'\1"\2",',
x[1:-1]),
amenities))])):
for amnty in amntis:
if amnty:
superhosts_listings_df[f'amenities_{amnty}'].iloc[idx] = 1
###Output
_____no_output_____
###Markdown
The original Boolean columns listed below can take a value of `t` or `f`. These were changed to `1` and `0`, respectively.
###Code
boolean_columns = ['host_has_profile_pic', 'host_identity_verified',
'is_location_exact', 'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification']
superhosts_listings_df[boolean_columns] = \
superhosts_listings_df[boolean_columns].applymap(lambda x: 1 if x == 't' else 0)
###Output
_____no_output_____
###Markdown
Columns that refer to price were originally encoded as string values because of the presence of currency. These were transformed into float variables.
###Code
numerical_columns = ['host_listings_count', 'accommodates',
'bathrooms', 'bedrooms',
'beds', 'square_feet',
'cleaning_fee', 'guests_included',
'minimum_nights', 'maximum_nights',
'minimum_minimum_nights', 'maximum_minimum_nights',
'minimum_maximum_nights', 'maximum_maximum_nights',
'minimum_nights_avg_ntm', 'maximum_nights_avg_ntm',
'availability_30', 'availability_60', 'availability_90',
'availability_365', 'number_of_reviews',
'number_of_reviews_ltm', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness',
'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value',
'calculated_host_listings_count',
'calculated_host_listings_count_entire_homes',
'calculated_host_listings_count_private_rooms',
'calculated_host_listings_count_shared_rooms',
'reviews_per_month']
process_to_numerical_columns = ['price', 'weekly_price',
'monthly_price', 'security_deposit',
'cleaning_fee', 'extra_people']
numerical_patterns = re.compile(r'[,|\$]')
superhosts_listings_df[process_to_numerical_columns] = \
superhosts_listings_df[process_to_numerical_columns].fillna('-1').\
applymap(lambda x: numerical_patterns.sub('', str(x))).astype(float)
superhosts_listings_df['host_response_rate'] = \
superhosts_listings_df['host_response_rate'].str.replace('%',
'').astype(float)
###Output
_____no_output_____
###Markdown
Categorical variables were transformed into dummy variables. This will be dummified along with the rest of the dataset.
###Code
dummify_columns = ['property_type', 'room_type', 'bed_type',
'cancellation_policy', 'market']
###Output
_____no_output_____
###Markdown
Finally, `host_response_time` was transformed into an ordinal variable instead of a categorical variable, as it can take any of the following values: `within an hour`, `within a few hours`, `within a day`, `a few days or more`. These variables were replaced with 0, 1, 2, and 3, respectively.
###Code
ordinal_columns = ['host_response_time']
superhosts_listings_df['host_response_time'] = \
superhosts_listings_df['host_response_time']\
.map({'within an hour': 0,
'within a few hours': 1,
'within a day': 2,
'a few days or more': 3})
superhosts_listings_df['cancellation_policy'] = \
superhosts_listings_df['cancellation_policy']\
.map({'flexible': 0,
'moderate': 1,
'strict_14_with_grace_period': 2,
'strict': 3, 'super_strict_30': 4,
'super_strict_60': 5})
superhosts_listings_df = superhosts_listings_df.drop(existence_columns, axis=1)
superhosts_listings_df.head(5)
###Output
_____no_output_____
###Markdown
The following columns were dropped from the `DataFrame`, as these will not be used in clustering: `listing_id`, `id`, `date`, `reviewer_id`, `reviewer_name`, `picture_url`, `host_thumbnail_url`, `host_picture_url`,`id_listings`, `listing_url`, `scrape_id`, `last_scraped`, `name`, `license`, `host_id`, `host_url`, `host_name`, `host_since`, `host_total_listings_count`, `host_total_listings_count`, `host_has_profile_pic`, `neighbourhood_cleansed`, `city`, `zipcode`, `first_review`, `last_review`. All pertain to scrape metadata, URLs, or dates. The column `host_total_listings_count` will also be dropped, as it has the same values as `host_listings_count`.
###Code
non_clustering_columns = ['id', 'picture_url', 'host_thumbnail_url',
'host_picture_url', 'listing_url', 'scrape_id',
'last_scraped', 'name',
'host_id', 'host_url', 'host_name',
'host_since', 'host_total_listings_count',
'host_total_listings_count', 'host_has_profile_pic',
'neighbourhood_cleansed', 'city', 'zipcode',
'first_review', 'last_review', 'host_verifications',
'amenities', 'calendar_last_scraped',
'calendar_updated']
superhosts_listings_df = superhosts_listings_df.drop(non_clustering_columns,
axis=1)
superhosts_listings_df.head(3)
###Output
_____no_output_____
###Markdown
The dataset's categorical variables were converted into dummy variables.
###Code
airbnb_df = pd.get_dummies(superhosts_listings_df)
###Output
_____no_output_____
###Markdown
After data cleanup, imputation was then performed for missing values. The variables were sorted into two: columns for median imputation and columns for mode imputation. Numerical variables were placed under median imputation, while Boolean variables were placed under mode imputation. Median imputation was used for variables pertaining to price instead of filling with 0, as it seems that though there are instances of USD 0 deposit fee or cleaning fee, it may be unlikely. According to [Airbnb](https://www.airbnb.com/help/article/2526/as-a-host-what-should-i-know-about-security-deposits:~:text=Guests%20will%20be%20shown%20the,for%20Airbnb%2Drequired%20security%20deposits), they have a formula aside from the amount set by the host. This is further expounded below:> Guests will be shown the amount of an Airbnb-required security deposit when they’re booking the reservation. We calculate the security deposit amount according to 60% of a listing’s nightly rate (this amount adjusts for longer stays). There is a USD 1,000 USD limit for Airbnb-required security deposits. If you require a security deposit for your listing and we also determine that one is required for a specific reservation, the amount that you set will be used. If the amount that you set is for USD 0, then the security deposit amount determined by Airbnb will be used.
###Code
median_impute_cols = ['host_response_rate', 'host_listings_count',
'accommodates', 'accommodates', 'bathrooms',
'bedrooms', 'beds', 'square_feet', 'price', 'weekly_price',
'monthly_price', 'security_deposit', 'cleaning_fee',
'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'minimum_minimum_nights',
'maximum_minimum_nights', 'minimum_maximum_nights',
'maximum_maximum_nights', 'minimum_nights_avg_ntm',
'maximum_nights_avg_ntm', 'availability_30', 'availability_60',
'availability_90', 'availability_365', 'number_of_reviews',
'number_of_reviews_ltm', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness',
'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value',
'calculated_host_listings_count',
'calculated_host_listings_count_entire_homes',
'calculated_host_listings_count_private_rooms',
'calculated_host_listings_count_shared_rooms', 'reviews_per_month',]
mode_impute_cols = ['host_response_time', 'host_identity_verified',
'is_location_exact','instant_bookable',
'cancellation_policy',
'require_guest_profile_picture',
'require_guest_phone_verification'] \
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('contains_')])\
+ list(airbnb_df.columns[airbnb_df.columns.str\
.contains('host_verifications_')])\
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('amenities_')]) \
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('market_')]) \
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('property_type_')]) \
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('room_type_')]) \
+ list(airbnb_df.columns[airbnb_df.columns.str.contains('bed_type_')])
airbnb_df[median_impute_cols] = pd.DataFrame(SimpleImputer(\
missing_values=np.nan,
strategy='median').\
fit_transform(\
airbnb_df[median_impute_cols]),
columns=median_impute_cols).values
airbnb_df[median_impute_cols] = pd.DataFrame(SimpleImputer(\
missing_values=-1,
strategy='median').\
fit_transform(\
airbnb_df[median_impute_cols]),
columns=median_impute_cols)\
.values
remaining_mode_impute_cols = list(airbnb_df[mode_impute_cols].\
columns[airbnb_df[mode_impute_cols]\
.isna().all(0)==False])
airbnb_df[remaining_mode_impute_cols] = pd.DataFrame(
SimpleImputer(missing_values=np.nan, strategy='most_frequent').\
fit_transform(airbnb_df[mode_impute_cols]),
columns=remaining_mode_impute_cols).values
###Output
_____no_output_____
###Markdown
Resulting dataset
###Code
print("The resulting shape of the `DataFrame` is now at {}.".format(airbnb_df.shape))
airbnb_df.head(2)
###Output
The resulting shape of the `DataFrame` is now at (4385, 263).
###Markdown
Methodology To understand different types of Airbnb superhosts, the researchers decided to group the listings into clusters of similar attributes. The following pipeline was done:: 1. There were 3 scaling methods that were tried in the data, namely StandardScaler, MinMaxScaler and RobustScaler. **MinMaxScaler** provided the best end output. 2. After using the appropriate scaling method, the researchers used sklearn’s **PCA** (Principal Component Analysis) to reduce the dimensions for better interpretability, less complexity, and easier visualization. 3. Having the feature set’s dimension reduced, sklearn’s KMeans and pyclustering’s KMedians were performed and validated using different number of clusters **inertias** (within-cluster sum of squared errors) and **Calinski Harabasz scores (CH)** (ratio between the within-cluster dispersion and the between-cluster dispersion) 4. Based on the graph, initially 4 k-means cluster was chosen to cluster the data. However, choosing 3 k-medians cluster provided better insights. The cluster prediction was assigned back to the dataset and exploratory data analysis was done.
###Code
pca = PCA(n_components=2, random_state=42)
airbnb_pca_df = pca.fit_transform(MinMaxScaler().fit_transform(airbnb_df))
plt.scatter(airbnb_pca_df[:,0], airbnb_pca_df[:,1])
plt.xlabel('PC1')
plt.ylabel('PC2');
sss = []
chs = []
gss = []
sss_medians = []
chs_medians = []
gss_medians = []
for i in tqdm(range(2,10)):
km = KMeans(n_clusters=i, random_state=0).fit(airbnb_pca_df)
cluster_kmeans = km.predict(airbnb_pca_df)
sss.append(km.inertia_)
chs.append(calinski_harabasz_score(airbnb_pca_df, cluster_kmeans))
initial_medians = airbnb_pca_df[:i]
kmedians_instance_tmp = kmedians(airbnb_pca_df, initial_medians)
kmedians_instance_tmp.process()
cluster_kmedians = kmedians_instance_tmp.predict(airbnb_pca_df)
sss_medians.append(kmedians_instance_tmp.get_total_wce())
chs_medians.append(calinski_harabasz_score(airbnb_pca_df, cluster_kmedians))
fig, ax = plt.subplots()
ks = np.arange(2, 10)
ax.plot(ks, sss, '-yo', label='SSE kmeans')
ax.plot(ks, chs, '-ro', label='CH kmeans')
ax.plot(ks, sss_medians, '-go', label='SSE kmedians')
ax.plot(ks, chs_medians, '-bo', label='CH kmedians')
ax.legend();
kmeans_instance = KMeans(n_clusters=4, random_state=0).fit(airbnb_pca_df)
kmeans_predictions = kmeans_instance.predict(airbnb_pca_df)
initial_medians = airbnb_pca_df[:3]
kmedians_instance = kmedians(airbnb_pca_df, initial_medians)
kmedians_instance.process()
kmedian_predictions = kmedians_instance.predict(airbnb_pca_df)
airbnb_df['kmeans_cluster'] = kmeans_predictions
airbnb_df['kmedians_cluster'] = kmedian_predictions
###Output
_____no_output_____
###Markdown
Results and Discussion For consistency of findings, data preprocessing and outlined methodology was conducted on the following DataFrame. This will be used in the followin results and discission.
###Code
airbnb_df = pd.read_csv('superhosts_cluster_minmax_imputed.csv.gz',
compression='gzip')
###Output
_____no_output_____
###Markdown
Insight 1: Majority of listings available in Tokyo are apartments, but each cluster contained a unique mix of property types. To gain a basic understanding of the listings available in Tokyo, the researchers first sought to understand what property type is representative of each cluster. Majority of the Tokyo listings for each cluster are classified as apartments, with 51-54% of Clusters 0 and 1 composed of apartments. Notably, 81% of Cluster 2 is comprised of apartments. However, the researchers also found interesting insights based on other property types. Table 1 details the proportion of property types found in each cluster.
###Code
prop_type_short = ['property_type_Aparthotel', 'property_type_Apartment',
'property_type_Bed and breakfast', 'property_type_Boutique hotel',
'property_type_Condominium', 'property_type_Hostel',
'property_type_House', 'property_type_Ryokan (Japan)']
print("Table 1. Proportion of listings for each property in a cluster.")
display(pd.DataFrame(airbnb_df.groupby(by='kmedians_cluster')\
[prop_type_short].mean()))
###Output
Table 1. Proportion of listings for each property in a cluster.
###Markdown
**Cluster 0**: With half of Cluster 0 being apartments, two other major property types found here are hostels (15%) and houses (15%). Other property types found here are aparthotels (3%), condominiums (3%), boutique hotels (1%), and ryokan (1%). **Cluster 1**: Twenty eight percent (28%) of Cluster 1 listings are composed of houses, much larger spaces compared to apartments. Other property types found here, albeit at small numbers, are condominiums (5%) and hostels (2%). **Cluster 2**: Aside from apartments (81%), also found here are houses (8%) and condominiums (5%). Interestingly, no bed and breakfasts, hostels, and Ryokans were placed in this cluster. A (ryokan (旅館))[https://www.japan-guide.com/e/e2029.html] is a type of traditional Japanese inn that typically feature tatami-matted rooms, communal baths, futon beds, and other public areas where visitors may wear yukata and talk with the owner. Insight 2: Price tiering was found in clustering. With the aforementioned representative property types, then researchers then investigate on possible price tiering. Table 2 shows the mean price features per cluster.
###Code
related_to_price = ['price', 'security_deposit', 'cleaning_fee',
'extra_people', 'accommodates']
print("Table 2. Median price features per cluster.")
pd.DataFrame(airbnb_df.groupby(by=['kmedians_cluster'])[related_to_price].\
mean()).rename(columns={'price':'Price',
'security_deposit':'Security deposit',
'cleaning_fee':'Cleaning fee',
'extra_people':'Price per extra person',
'accommodates': 'Allowable number of guests'})\
.applymap(lambda x: f'{x:,.2f}')
###Output
Table 2. Median price features per cluster.
###Markdown
Across the clusters, it is apparent that a specific price tier exists here. For the Airbnb dataset, this refers to the nightly price. Security deposit and cleaning fee are an add-on to the total amount, and both adjust according to the length of stay. Price per extra person refers to the cost of an additional guest exceeding the allowable number of guests. **Cluster 0**: Cluster 0 has the highest mean price at ¥ 26,076, but has the lowest security deposit, cleaning fee, and price per extra person. However, it also houses the smallest number of allowable guests at 3.43, which proves that listings found here are not as cost efficient. **Cluster 1**: Cluster 1 has the highest allowable number of guests at 5.02 people, which point that these listings are perhaps targeted for families. It also has the highest security deposit and price per extra person. These listings can also be considered cost efficient, as it is cheaper than Cluster 0 listings at an average of ¥ 6,606. **Cluster 2**: Cluster 2 listings have the lowest mean price yet highest mean cleaning fee. Cluster 2 listings are also targeted for average-sized groups of travelers at 4.4. Insight 3: Guests agree that Cluster 0 listings are to be avoided. Table 3 shows the mean rating per cluster as well as the mean number of reviews per month.
###Code
related_to_reviews = ['review_scores_rating',
'review_scores_cleanliness',
'review_scores_accuracy',
'review_scores_checkin',
'review_scores_communication',
'review_scores_location',
'review_scores_value',
'reviews_per_month']
print("Table 3. Mean rating features per cluster.")
pd.DataFrame(airbnb_df.groupby(by=['kmedians_cluster'])\
[related_to_reviews].mean()).rename(columns={
'review_scores_rating':'Overall rating',
'review_scores_cleanliness': 'Rating on cleanliness',
'review_scores_accuracy':'Rating on accuracy',
'review_scores_checkin':'Rating on check-in',
'review_scores_communication': 'Rating on communication',
'review_scores_location':'Rating on location',
'review_scores_value':'Rating on value',
'reviews_per_month':'Reviews per month'})
###Output
Table 3. Mean rating features per cluster.
###Markdown
As expected from superhost listings, features related to ratings will always be high. However, there are still distinct features to be highlighted for each cluster. **Cluster 0**: Cluster 0 has the lowest mean overall rating at 95.89. It also ranks the lowest on all aspects, except for location. Because of its fairly low ratings, it is not booked as frequently compared to other clusters with only 1.94 reviews per month. It seems that there is a consensus among the guests in Cluster 0 listings that staying in these listings will not guarantee the best time. **Cluster 1**: Cluster 1 listings have the highest mean overall rating at 96.89. It also ranks the highest in terms of accuracy, cleanliness, communication, and value. Both Cluster 1 and 2 receive high number of reviews per month. **Cluster 2**: Cluster 2 listings perform fairly well across the specific rating categories, but it is notable that it has the highest score for check-in (9.92) and the lowest in location (9.54). It has the highest average of reviews per month at 2.45 reviews. Insight 4: Amenities-related Table 4 shows the proportion of listings within a cluster that possess a certain amenity.
###Code
family_cols = ['amenities_Microwave', 'amenities_Washer',
'amenities_Hot water kettle', 'amenities_Kitchen',
'amenities_Cooking basics','amenities_Refrigerator',
'amenities_Dishes and silverware',
'amenities_Extra pillows and blankets', 'amenities_Oven',
'amenities_TV', 'amenities_No stairs or steps to enter',
'amenities_Family/kid friendly']
kmedians_cluster_sizes = airbnb_df.groupby('kmedians_cluster').size()
amenities_superhosts = airbnb_df.groupby('kmedians_cluster').sum()[family_cols]
print("Table 4. Proportion of listings within a cluster that "
"possess basic amenities.")
pd.DataFrame([amenities_superhosts.iloc[0]/kmedians_cluster_sizes[0],
amenities_superhosts.iloc[1]/kmedians_cluster_sizes[1],
amenities_superhosts.iloc[2]/kmedians_cluster_sizes[2]])
###Output
Table 4. Proportion of listings within a cluster that possess basic amenities.
###Markdown
**Cluster 0**: With listings in Cluster 0 having a very high price point, it is a wonder that they still get relatively high scores without having much listed amenities. Notice that it consistently ranks the lowest in Table 4.**Cluster 1**: Cluster 1 consistently has the highest proportion of listings that contain amenities fit for families since it allows for a more convenient stay experience for the target audience which are probably families. Also seen on Table 4 are listed amenities like being `Family/kid friendly` and having `No stairs or steps to enter` which gives more assurance and confidence to families in staying in the listed unit. To add to this, found in Table 5 are other amenities fit for families with babies and toddlers. Cluster 1 consistently ranks the highest in providing these amenities.
###Code
for_kids = ['amenities_Children’s dinnerware',
'amenities_Children’s books and toys',
'amenities_Outlet covers', 'amenities_Baby monitor',
'amenities_Baby bath', 'amenities_Babysitter recommendations',
'amenities_Table corner guards',
'amenities_Pack ’n Play/travel crib',
'amenities_Family/kid friendly', 'amenities_Window guards',
'amenities_Flat path to guest entrance', 'amenities_High chair',
'amenities_Crib', 'amenities_Changing table',
'amenities_Fireplace guards', 'amenities_Stair gates']
kmedians_cluster_sizes = airbnb_df.groupby('kmedians_cluster').size()
amenities_superhosts = airbnb_df.groupby('kmedians_cluster').sum()[for_kids]
print("Table 5. Proportion of listings within a cluster that "
"possess amenities for babies.")
pd.DataFrame([amenities_superhosts.iloc[0]/kmedians_cluster_sizes[0],
amenities_superhosts.iloc[1]/kmedians_cluster_sizes[1],
amenities_superhosts.iloc[2]/kmedians_cluster_sizes[2]])
###Output
Table 5. Proportion of listings within a cluster that possess amenities for babies.
###Markdown
**Cluster 2**: Clusters 2 listings are able to offer the same amenities as Cluster 1 listings, as their proportions are comparable on most amenities. However, it is worth noting that Cluster 2 listings take the lead when considering amenities related to travelers who brought work with them, as shown in Table 6.
###Code
biz_friendly = ['amenities_Laptop friendly workspace', 'amenities_Wifi']
kmedians_cluster_sizes = airbnb_df.groupby('kmedians_cluster').size()
amenities_superhosts = airbnb_df.groupby('kmedians_cluster').sum()[biz_friendly]
print("Table 6. Proportion of listings within a cluster that possess "
"work-friendly amenities.")
pd.DataFrame([amenities_superhosts.iloc[0]/kmedians_cluster_sizes[0],
amenities_superhosts.iloc[1]/kmedians_cluster_sizes[1],
amenities_superhosts.iloc[2]/kmedians_cluster_sizes[2]]
).rename(columns={'amenities_Laptop friendly workspace':
'Laptop-friendly workspace',
'amenities_Wifi':'Wifi'})
###Output
Table 6. Proportion of listings within a cluster that possess work-friendly amenities.
###Markdown
In summary, found in Table 7 are the key points to be highlighted per cluster. Table 7. Key features of each identified cluster of Airbnb superhosts' listings.|Feature|Cluster 0|Cluster 1|Cluster 2| |:- |:-|:-|:-| |**Target Market**|**High-paying guests**|**Families**|**Budget travelers**| |**Property Type**| Apartments, hostels, houses, aparthotels| Apartments, houses| Apartments, houses, condominiums| |**Price**|Least cost efficient|Cost-efficient for big groups|Cost efficient for small- to average-sized groups| |**Rating**|Lowest rated overall|Highest rated overall|Good ratings| |**Amenities**|Lacking in amenities|Family-friendly amenities|Similar to Cluster 1, but also offers amenities for business travelers|
###Code
m = folium.Map([35.692434, 139.734298], zoom_start=10, tiles="CartoDB positron")
def color(cluster):
"""return a color based on the cluster number. defaults to white if
cluster is greater than 3"""
colors = ['purple', 'lightblue', 'green', 'beige']
return colors[int(cluster)]
def icon(cluster):
"""returns a custom icon based on the cluster number"""
icon = [('glyphicon', 'user'), ('fa', 'usd'), ('fa', 'plane'), ('glyphicon', 'home')]
return icon[int(cluster)]
print('Generating map...\nWait for the link!')
for i in tqdm(range(airbnb_df.shape[0])):
folium.Marker(
location=[airbnb_df.iloc[i]['latitude'], airbnb_df.iloc[i]['longitude']],
radius=5,
popup='<h5>Review Scores</h5>' + ''.join(list(map(lambda x: f'<div><b>{x[0][14:].title()}</b>:<br>{x[1].values[0]}</div>',airbnb_df.iloc[[i]][airbnb_df.columns[airbnb_df.columns.str.contains('score')]].items()))),
opacity=0.5,
icon=folium.Icon(color=color(airbnb_df.iloc[i]['kmeans_cluster']), icon=icon(airbnb_df.iloc[i]['kmeans_cluster'])[1], prefix=icon(airbnb_df.iloc[i]['kmeans_cluster'])[0])
).add_to(m)
m.save('map.html')
print('Done!')
IPython.display.HTML('<h3><center>Click here to open the <a target="_blank" href="map.html">map</a> generated.</center></h3>')
###Output
Generating map...
Wait for the link!
|
docs/source/resources/transition_to_ft_v1.0.ipynb | ###Markdown
Transitioning to Featuretools Version 1.0Featuretools version 1.0 incorporates many significant changes that impact the way EntitySets are created, how primitives are defined, and in some cases the resulting feature matrix that is created. This document will provide an overview of the significant changes, helping existing Featuretools users transition to version 1.0. Background and Introduction Why make these changes?The lack of a unified type system across libraries makes sharing information between libraries more difficult. This problem led to the development of [Woodwork](https://woodwork.alteryx.com/en/stable/). Updating Featuretools to use Woodwork for managing column typing information enables easy sharing of feature matrix column types with other libraries without costly conversions between custom type systems. As an example, [EvalML](https://evalml.alteryx.com/en/stable/), which has also adopted Woodwork, can now use Woodwork typing information on a feature matrix directly to create machine learning models, without first inferring or redefining column types.Other benefits of using Woodwork for managing typing in Featuretools include:- Simplified code - custom type management code has been removed- Seamless integration of new types and improvements to type integration as Woodwork improves- Easy and flexible storage of additional information about columns. For example, we can now store whether a feature was engineered by Featuretools or present in the original data. What has changed?- The legacy Featuretools custom typing system has been replaced with Woodwork for managing column types- Both the `Entity` and `Variable` classes have been removed from Featuretools- Several key Featuretools methods have been moved or updated Comparison between legacy typing system and Woodwork typing systems| Featuretools < 1.0 | Featuretools 1.0 | Description || ---- | ---- | ---- || Entity | Woodwork DataFrame | stores typing information for all columns || Variable | ColumnSchema | stores typing information for a single column || Variable subclass | LogicalType and semantic_tags | elements used to define a column type | Summary of significant method changesThe table below outlines the most significant changes that have occurred. In Summary: In some cases, the method arguments have also changed, and those changes are outlined in more detail throughout this document.| Older Versions | Featuretools 1.0 || ---- | ---- || EntitySet.entity_from_dataframe | EntitySet.add_dataframe || EntitySet.normalize_entity | EntitySet.normalize_dataframe || EntitySet.update_data | EntitySet.replace_dataframe || Entity.variable_types | es['dataframe_name'].ww || es['entity_id']['variable_name'] | es['dataframe_name'].ww.columns['column_name'] || Entity.convert_variable_type | es['dataframe_name'].ww.set_types || Entity.add_interesting_values | es.add_interesting_values(dataframe_name='df_name', ...) || Entity.set_secondary_time_index | es.set_secondary_time_index(dataframe_name='df_name', ...) || Feature(es['entity_id']['variable_name']) | Feature(es['dataframe_name'].ww['column_name']) || dfs(target_entity='entity_id', ...) | dfs(target_dataframe_name='dataframe_name', ...) | For more information on how Woodwork manages typing information, refer to the [Woodwork Understanding Types and Tags](https://woodwork.alteryx.com/en/stable/guides/logical_types_and_semantic_tags.html) guide. What do these changes mean for users?Removing these classes required moving several methods from the `Entity` to the `EntitySet` object. This change also impacts the way relationships, features and primitives are defined, requiring different parameters than were previously required. Also, because the Woodwork typing system is not identical to the old Featuretools typing system, in some cases the feature matrix that is returned can be slightly different as a result of columns being identified as different types.All of these changes, and more, will be reviewed in detail throughout this document, providing examples of both the old and new API where possible. Removal of `Entity` Class and Updates to `EntitySet`In previous versions of Featuretools an EntitySet was created by adding multiple entities and then defining relationships between variables (columns) in different entities. Starting in Featuretools version 1.0, EntitySets are now created by adding multiple dataframes and defining relationships between columns in the dataframes. While conceptually similar, there are some minor differences in the process. Adding dataframes to an EntitySetWhen adding dataframes to an EntitySet, users can pass in a Woodwork dataframe or a regular dataframe without Woodwork typing information. As before, Featuretools supports creating EntitySets from pandas, Dask and Koalas dataframes. If users supply a dataframe that has Woodwork typing information initialized, Featuretools will simply use this typing information directly. If users supply a dataframe without Woodwork initialized, Featuretools will initialize Woodwork on the dataframe, performing type inference for any column that does not have typing information specified.Below are some examples to illustrate this process. First we will create two small dataframes to use for the example.
###Code
import featuretools as ft
import pandas as pd
import woodwork as ww
orders_df = pd.DataFrame({
'order_id': [0, 1, 2],
'order_date': ['2021-01-02', '2021-01-03', '2021-01-04']
})
items_df = pd.DataFrame({
'id': [0, 1, 2, 3, 4],
'order_id': [0, 1, 1, 2, 2],
'item_price': [29.95, 4.99, 10.25, 20.50, 15.99],
'on_sale': [False, True, False, True, False]
})
###Output
_____no_output_____
###Markdown
With older versions of Featuretools, users would first create an EntitySet object, and then add dataframes to the EntitySet, by calling `entity_from_dataframe` as shown below.```pythones = ft.EntitySet('old_es')es.entity_from_dataframe(dataframe=orders_df, entity_id='orders', index='order_id', time_index='order_date')es.entity_from_dataframe(dataframe=items_df, entity_id='items', index='id')``````Entityset: old_es Entities: orders [Rows: 3, Columns: 2] items [Rows: 5, Columns: 3] Relationships: No relationships``` With Featuretools 1.0, the steps for adding a dataframe to an EntitySet are the same, but some of the details have changed. First, create an EntitySet as before. To add the dataframe call `EntitySet.add_dataframe` in place of the previous `EntitySet.entity_from_dataframe` call. Note that the name of the dataframe is specified in the `dataframe_name` argument, which was previously called `entity_id`.
###Code
es = ft.EntitySet('new_es')
es.add_dataframe(dataframe=orders_df,
dataframe_name='orders',
index='order_id',
time_index='order_date')
###Output
_____no_output_____
###Markdown
You can also define the name, index, and time index by first [initializing Woodwork](https://woodwork.alteryx.com/en/stable/generated/woodwork.table_accessor.WoodworkTableAccessor.init.htmlwoodwork.table_accessor.WoodworkTableAccessor.init) on the dataframe and then passing the Woodwork initialized dataframe directly to the `add_dataframe` call. For this example we will initialize Woodwork on `items_df`, setting the dataframe name as `items` and specifying that the index should be the `id` column.
###Code
items_df.ww.init(name='items', index='id')
items_df.ww
###Output
_____no_output_____
###Markdown
With Woodwork initialized, we no longer need to specify values for the `dataframe_name` or `index` arguments when calling `add_dataframe` as Featuretools will simply use the values that were already specified when Woodwork was initialized.
###Code
es.add_dataframe(dataframe=items_df)
###Output
_____no_output_____
###Markdown
Accessing column typing informationPreviously, column variable type information could be accessed for an entire Entity through `Entity.variable_types` or for an individual column by selecting the individual column first through `es['entity_id']['col_id']`.```pythones['items'].variable_types``````{'id': featuretools.variable_types.variable.Index, 'order_id': featuretools.variable_types.variable.Numeric, 'item_price': featuretools.variable_types.variable.Numeric}``````pythones['items']['item_price']`````````With the updated version of Featuretools, the logical types and semantic tags for all of the columns in a single dataframe can be viewed through the `.ww` namespace on the dataframe. First, select the dataframe from the EntitySet with `es['dataframe_name']` and then access the typing information by chaining a `.ww` call on the end as shown below.
###Code
es['items'].ww
###Output
_____no_output_____
###Markdown
The logical type and semantic tags for a single column can be obtained from the Woodwork columns dictionary stored on the dataframe, returning a `Woodwork.ColumnSchema` object that stores the typing information:
###Code
es['items'].ww.columns['item_price']
###Output
_____no_output_____
###Markdown
Type inference and updating column typesFeaturetools will attempt to infer types for any columns that do not have types defined by the user. Prior to version 1.0, Featuretools implemented custom type inference code to determine what variable type should be assigned to each column. You could see the inferred variable types by viewing the contents of the `Entity.variable_types` dictionary.Starting in Featuretools 1.0, column type inference is being handled by Woodwork. Any columns that do not have a logical type assigned by the user when adding a dataframe to an EntitySet will have their logical types inferred by Woodwork. As before, type inference can be skipped for any columns in a dataframe by passing the appropriate logical types in a dictionary when calling `EntitySet.add_dataframe`.As an example, we can create a new dataframe and add it to an EntitySet, specifying the logical type for the user's full name as the Woodwork `PersonFullName` logical type.
###Code
users_df = pd.DataFrame({
'id': [0, 1, 2],
'name': ['John Doe', 'Rita Book', 'Teri Dactyl']
})
es.add_dataframe(dataframe=users_df,
dataframe_name='users',
index='id',
logical_types={'name': 'PersonFullName'})
es['users'].ww
###Output
_____no_output_____
###Markdown
Looking at the typing information above, we can see that the logical type for the `name` column was set to `PersonFullName` as we specified.Situations will occur where type inference identifies a column as having the incorrect logical type. In these situations, the logical type can be updated using the Woodwork `set_types` method. Let's say we want the `order_id` column of the `orders` dataframe to have a `Categorical` logical type instead of the `Integer` type that was inferred. Previously, this would have accomplished through the `Entity.convert_variable_type` method.```pythonfrom featuretools.variable_types import Categoricales['items'].convert_variable_type(variable_id='order_id', new_type=Categorical)```Now, we can perform this same update using Woodwork:
###Code
es['items'].ww.set_types(logical_types={'order_id': 'Categorical'})
es['items'].ww
###Output
_____no_output_____
###Markdown
For additional information on Woodwork typing and how it is used in Featuretools, refer to [Woodwork Typing in Featuretools](../getting_started/woodwork_types.ipynb). Adding interesting valuesInteresting values can be added to all dataframes in an EntitySet, a single dataframe in an EntitySet, or to a single column of a dataframe in an EntitySet.To add interesting values for all of the dataframes in an EntitySet, simply call `EntitySet.add_interesting_values`, optionally specifying the maximum number of values to add for each column. This remains unchanged from older versions of Featuretools to the 1.0 release.Adding values for a single dataframe or for a single column has changed. Previously to add interesting values for an Entity, users would call `Entity.add_interesting_values()`:```pythones['items'].add_interesting_values()```Now, in order to specify interesting values for a single dataframe, you call `add_interesting_values` on the EntitySet, and pass the name of the dataframe for which you want interesting values added:
###Code
es.add_interesting_values(dataframe_name='items')
###Output
_____no_output_____
###Markdown
Previously, to manually add interesting values for a column, you would simply assign them to the attribute of the variable:```pythones['items']['order_id'].interesting_values = [1, 2]```Now, this is done through `EntitySet.add_interesting_values`, passing in the name of the dataframe and a dictionary mapping column names to the interesting values to assign for that column. For example, to assign the interesting values of `[1, 2]` to the `order_id` column of the `items` dataframe, use the following approach:
###Code
es.add_interesting_values(dataframe_name='items',
values={'order_id': [1, 2]})
###Output
_____no_output_____
###Markdown
Interesting values for multiple columns in the same dataframe can be assigned by adding more entries to the dictionary passed to the `values` parameter.Accessing interesting values has changed as well. Previously interesting values could be viewed from the variable:```pythones['items']['order_id'].interesting_values```Interesting values are now stored in the Woodwork metadata for the columns in a dataframe:
###Code
es['items'].ww.columns['order_id'].metadata['interesting_values']
###Output
_____no_output_____
###Markdown
Setting a secondary time indexIn earlier versions of Featuretools, a secondary time index could be set on an Entity by calling `Entity.set_secondary_time_index`. ```pythones_flight = ft.demo.load_flight(nrows=100)arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay', 'national_airspace_delay', 'security_delay', 'late_aircraft_delay', 'canceled', 'diverted', 'taxi_in', 'taxi_out', 'air_time', 'dep_time']es_flight['trip_logs'].set_secondary_time_index({'arr_time': arr_time_columns})```Since the `Entity` class has been removed in Featuretools 1.0, this now needs to be done through the `EntitySet` instead:
###Code
es_flight = ft.demo.load_flight(nrows=100)
arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay',
'national_airspace_delay', 'security_delay',
'late_aircraft_delay', 'canceled', 'diverted',
'taxi_in', 'taxi_out', 'air_time', 'dep_time']
es_flight.set_secondary_time_index(dataframe_name='trip_logs',
secondary_time_index={'arr_time': arr_time_columns})
###Output
_____no_output_____
###Markdown
Previously, the secondary time index could be accessed directly from the Entity with `es_flight['trip_logs'].secondary_time_index`. Starting in Featuretools 1.0 the secondary time index and the associated columns are stored in the Woodwork dataframe metadata and can be accessed as shown below.
###Code
es_flight['trip_logs'].ww.metadata['secondary_time_index']
###Output
_____no_output_____
###Markdown
Normalizing Entities/DataFrames`EntitySet.normalize_entity` has been renamed to `EntitySet.normalize_dataframe` in Featuretools 1.0. The new method works in the same way as the old method, but some of the parameters have been renamed. The table below shows the old and new names for reference. When calling this method, the new parameter names need to be used.| Old Parameter Name | New Parameter Name || --- | --- || base_entity_id | base_dataframe_name || new_entity_id | new_dataframe_name || additional_variables | additional_columns || copy_variables | copy_columns || new_entity_time_index | new_dataframe_time_index || new_entity_secondary_time_index | new_dataframe_secondary_time_index | Defining and adding relationshipsIn earlier versions of Featuretools, relationships were defined by creating a `Relationship` object, which took two `Variables` as inputs. To define a relationship between the orders Entity and the items Entity, we would first create a `Relationship` and then add it to the EntitySet:```pythonrelationship = ft.Relationship(es['orders']['order_id'], es['items']['order_id'])es.add_relationship(relationship)```With Featuretools 1.0, the process is similar, but there are two different ways to add the relationship to the EntitySet. One way is to pass the dataframe and column names to `EntitySet.add_relationship`, and another is to pass a previously created `Relationship` object to the `relationship` keyword argument. Both approaches are demonstrated below.
###Code
# Undo change from above and change child column logical type to match parent and prevent warning
# NOTE: This cell is hidden in the docs build
es['items'].ww.set_types(logical_types={'order_id': 'Integer'})
es.add_relationship(parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
# Reset the relationship so we can add it again
# NOTE: This cell is hidden in the docs build
es.relationships = []
###Output
_____no_output_____
###Markdown
Alternatively, we can first create a `Relationship` and pass that to `EntitySet.add_relationship`. When defining a `Relationship` we need to pass in the EntitySet to which it belongs along with the names for the parent dataframe and parent column and the name of the child dataframe and child column.
###Code
relationship = ft.Relationship(entityset=es,
parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
es.add_relationship(relationship=relationship)
###Output
_____no_output_____
###Markdown
Updating data for a dataframe in an EntitySetPreviously to update (replace) the data associated with an Entity, users could call `Entity.update_data` and pass in the new dataframe. As an example, let's update the data in our `users` Entity:```pythonnew_users_df = pd.DataFrame({ 'id': [3, 4], 'name': ['Anne Teak', 'Art Decco']})es['users'].update_data(df=new_users_df)```To accomplish this task with Featuretools 1.0, we will use the `EntitySet.replace_dataframe` method instead:
###Code
new_users_df = pd.DataFrame({
'id': [0, 1],
'name': ['Anne Teak', 'Art Decco']
})
es.replace_dataframe(dataframe_name='users', df=new_users_df)
es['users']
###Output
_____no_output_____
###Markdown
Defining featuresThe syntax for defining features has changed slightly in Featuretools 1.0. Previously, identity features could be defined simply by passing in the variable that should be used to build the feature.```pythonfeature = ft.Feature(es['items']['item_price'])```Starting with Featuretools 1.0, a similar syntax can be used, but because `es['items']` will now return a Woodwork dataframe instead of an `Entity`, we need to update the syntax slightly to access the Woodwork column. To update, simply add `.ww` between the dataframe name selector and the column selector as shown below.
###Code
feature = ft.Feature(es['items'].ww['item_price'])
###Output
_____no_output_____
###Markdown
Defining primitivesIn earlier versions of Featuretools, primitive input and return types were defined by specifying the appropriate `Variable` class. Starting in version 1.0, the input and return types are defined by Woodwork `ColumnSchema` objects. To illustrate this change, let's look closer at the `Age` transform primitive. This primitive takes a datetime representing a date of birth and returns a numeric value corresponding to a person's age. In previous versions of Featuretools, the input type was defined by specifying the `DateOfBirth` variable type and the return type was specified by the `Numeric` variable type:```pythoninput_types = [DateOfBirth]return_type = Numeric```Woodwork does not have a specific `DateOfBirth` logical type, but rather identifies a column as a date of birth column by specifying the logical type as `Datetime` with a semantic tag of `date_of_birth`. There is also no `Numeric` logical type in Woodwork, but rather Woodwork identifies all columns that can be used for numeric operations with the semantic tag of `numeric`. Furthermore, we know the `Age` primitive will return a floating point number, which would correspond to a Woodwork logical type of `Double`. With these items in mind, we can redefine the `Age` input types and return types with `ColumnSchema` objects as follows:```pythoninput_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'})]return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})```Aside from changing the way input and return types are defined, the rest of the process for defining primitives remains unchanged. Mapping from old Featuretools variable types to Woodwork ColumnSchemasTypes defined by Woodwork differ from the old variable types that were defined by Featuretools prior to version 1.0. While there is not a direct mapping from the old variable types to the new Woodwork types defined by `ColumnSchema` objects, the approximate mapping is shown below.| Featuretools Variable | Woodwork Column Schema || --- | --- || Boolean | ColumnSchema(logical_type=Boolean) or ColumnSchema(logical_type=BooleanNullable) || Categorical | ColumnSchema(logical_type=Categorical) || CountryCode | ColumnSchema(logical_type=CountryCode) || Datetime | ColumnSchema(logical_type=Datetime) || DateOfBirth | ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'}) || DatetimeTimeIndex | ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}) || Discrete | ColumnSchema(semantic_tags={'category'}) || EmailAddress | ColumnSchema(logical_type=EmailAddress) || FilePath | ColumnSchema(logical_type=Filepath) || FullName | ColumnSchema(logical_type=PersonFullName) || Id | ColumnSchema(semantic_tags={'foreign_key'}) || Index | ColumnSchema(semantic_tags={'index'}) || IPAddress | ColumnSchema(logical_type=IPAddress) || LatLong | ColumnSchema(logical_type=LatLong) || NaturalLanguage | ColumnSchema(logical_type=NaturalLanguage) || Numeric | ColumnSchema(semantic_tags={'numeric'}) || NumericTimeIndex | ColumnSchema(semantic_tags={'numeric', 'time_index'}) || Ordinal | ColumnSchema(logical_type=Ordinal) || PhoneNumber | ColumnSchema(logical_type=PhoneNumber) || SubRegionCode | ColumnSchema(logical_type=SubRegionCode) || Timedelta | ColumnSchema(logical_type=Timedelta) || TimeIndex | ColumnSchema(semantic_tags={'time_index'}) || URL | ColumnSchema(logical_type=URL) || Unknown | ColumnSchema(logical_type=Unknown) || ZIPCode | ColumnSchema(logical_type=PostalCode) | Changes to Deep Feature Synthesis and Calculate Feature MatrixThe argument names for both `featuretools.dfs` and `featuretools.calculate_feature_matrix` have changed slightly in Featuretools 1.0. In prior versions, users could generate a list of features using the default primitives and options like this:```pythonfeatures = ft.dfs(entityset=es, target_entity='items', features_only=True)```In Featuretools 1.0, the `target_entity` argument has been renamed to `target_dataframe_name`, but otherwise this basic call remains the same.
###Code
features = ft.dfs(entityset=es,
target_dataframe_name='items',
features_only=True)
features
###Output
_____no_output_____
###Markdown
In addition, the `dfs` argument `ignore_entities` was renamed to `ignore_dataframes` and `ignore_variables` was renamed to `ignore_columns`. Similarly, if specifying primitive options, all references to `entities` should be replaced with `dataframes` and references to `variables` should be replaced with columns. For example, the primitive option of `include_groupby_entities` is now `include_groupby_dataframes` and `include_variables` is now `include_columns`.The basic call to `featuretools.calculate_feature_matrix` remains unchanged if passing in an EntitySet along with a list of features to caluculate. However, users calling `calculate_feature_matrix` by passing in a list of `entities` and `relationships` should note that the `entities` argument has been renamed to `dataframes` and the values in the dictionary values should now include Woodwork logical types instead of Featuretools `Variable` classes.
###Code
feature_matrix = ft.calculate_feature_matrix(features=features, entityset=es)
feature_matrix
###Output
_____no_output_____
###Markdown
In addition to the changes in argument names, there are a couple other changes to the returned feature matrix that users should be aware of. First, because of slight differences in the way Woodwork defines column types compared to how the prior Featuretools implementation did, there can be some differences in the features that are generated between old and new versions. The most notable impact is in the way foreign key columns are handled. Previously, Featuretools treated all foreign key (previously `Id`) columns as categorical columns, and would generate appropriate features from these columns. Starting in version 1.0, foreign key columns are not constrained to be categorical, and if they are another type such as `Integer`, features will not be generated from these columns. Manually converting foreign key columns to `Categorical` as shown above will result in features much closer to those achieved with previous versions.Also, because Woodwork's type inference process differs from the previous Featuretools type inference process, an EntitySet may have column types identified differently. This difference in column types could impact the features that are generated. If it is important to have the same set of features, check all of the logical types in the EntitySet dataframes and update them to the expected types if there are columns that have been inferred as unexpected types.Finally, the feature matrix calculated by Featuretools will now have Woodwork initialized. This means that users can view feature matrix column typing information through the Woodwork namespace as follows.
###Code
feature_matrix.ww
###Output
_____no_output_____
###Markdown
Featuretools now labels features by whether they were originally in the dataframes, or whether they were created by Featuretools. This information is stored in the Woodwork `origin` attribute for the column. Columns that were in the original data will be labeled with `base` and features that were created by Featuretools will be labeled with `engineered`.As a demonstration of how to access this information, let's compare two features in the feature matrix: `item_price` and `orders.MEAN(items.item_price)`. `item_price` was present in the original data, and `orders.MEAN(items.item_price)` was created by Featuretools.
###Code
feature_matrix.ww['item_price'].ww.origin
feature_matrix.ww['orders.MEAN(items.item_price)'].ww.origin
###Output
_____no_output_____
###Markdown
Transitioning to Featuretools Version 1.0Featuretools version 1.0 incorporates many significant changes that impact the way EntitySets are created, how primitives are defined, and in some cases the resulting feature matrix that is created. This document will provide an overview of the significant changes, helping existing Featuretools users transition to version 1.0. Background and Introduction Why make these changes?The lack of a unified type system across libraries makes sharing information between libraries more difficult. This problem led to the development of [Woodwork](https://woodwork.alteryx.com/en/stable/). Updating Featuretools to use Woodwork for managing column typing information enables easy sharing of feature matrix column types with other libraries without costly conversions between custom type systems. As an example, [EvalML](https://evalml.alteryx.com/en/stable/), which has also adopted Woodwork, can now use Woodwork typing information on a feature matrix directly to create machine learning models, without first inferring or redefining column types.Other benefits of using Woodwork for managing typing in Featuretools include:- Simplified code - custom type management code has been removed- Seamless integration of new types and improvements to type integration as Woodwork improves- Easy and flexible storage of additional information about columns. For example, we can now store whether a feature was engineered by Featuretools or present in the original data. What has changed?- The legacy Featuretools custom typing system has been replaced with Woodwork for managing column types- Both the `Entity` and `Variable` classes have been removed from Featuretools- Several key Featuretools methods have been moved or updated Comparison between legacy typing system and Woodwork typing systems| Featuretools < 1.0 | Featuretools 1.0 | Description || ---- | ---- | ---- || Entity | Woodwork DataFrame | stores typing information for all columns || Variable | ColumnSchema | stores typing information for a single column || Variable subclass | LogicalType and semantic_tags | elements used to define a column type | Summary of significant method changesThe table below outlines the most significant changes that have occurred. In Summary: In some cases, the method arguments have also changed, and those changes are outlined in more detail throughout this document.| Older Versions | Featuretools 1.0 || ---- | ---- || EntitySet.entity_from_dataframe | EntitySet.add_dataframe || EntitySet.normalize_entity | EntitySet.normalize_dataframe || EntitySet.update_data | EntitySet.replace_dataframe || Entity.variable_types | es['dataframe_name'].ww || es['entity_id']['variable_name'] | es['dataframe_name'].ww.columns['column_name'] || Entity.convert_variable_type | es['dataframe_name'].ww.set_types || Entity.add_interesting_values | es.add_interesting_values(dataframe_name='df_name', ...) || Entity.set_secondary_time_index | es.set_secondary_time_index(dataframe_name='df_name', ...) || Feature(es['entity_id']['variable_name']) | Feature(es['dataframe_name'].ww['column_name']) || dfs(target_entity='entity_id', ...) | dfs(target_dataframe_name='dataframe_name', ...) | For more information on how Woodwork manages typing information, refer to the [Woodwork Understanding Types and Tags](https://woodwork.alteryx.com/en/stable/guides/logical_types_and_semantic_tags.html) guide. What do these changes mean for users?Removing these classes required moving several methods from the `Entity` to the `EntitySet` object. This change also impacts the way relationships, features and primitives are defined, requiring different parameters than were previously required. Also, because the Woodwork typing system is not identical to the old Featuretools typing system, in some cases the feature matrix that is returned can be slightly different as a result of columns being identified as different types.All of these changes, and more, will be reviewed in detail throughout this document, providing examples of both the old and new API where possible. Removal of `Entity` Class and Updates to `EntitySet`In previous versions of Featuretools an EntitySet was created by adding multiple entities and then defining relationships between variables (columns) in different entities. Starting in Featuretools version 1.0, EntitySets are now created by adding multiple dataframes and defining relationships between columns in the dataframes. While conceptually similar, there are some minor differences in the process. Adding dataframes to an EntitySetWhen adding dataframes to an EntitySet, users can pass in a Woodwork dataframe or a regular dataframe without Woodwork typing information. As before, Featuretools supports creating EntitySets from pandas, Dask and Koalas dataframes. If users supply a dataframe that has Woodwork typing information initialized, Featuretools will simply use this typing information directly. If users supply a dataframe without Woodwork initialized, Featuretools will initialize Woodwork on the dataframe, performing type inference for any column that does not have typing information specified.Below are some examples to illustrate this process. First we will create two small dataframes to use for the example.
###Code
import featuretools as ft
import pandas as pd
import woodwork as ww
orders_df = pd.DataFrame({
'order_id': [0, 1, 2],
'order_date': ['2021-01-02', '2021-01-03', '2021-01-04']
})
items_df = pd.DataFrame({
'id': [0, 1, 2, 3, 4],
'order_id': [0, 1, 1, 2, 2],
'item_price': [29.95, 4.99, 10.25, 20.50, 15.99],
'on_sale': [False, True, False, True, False]
})
###Output
_____no_output_____
###Markdown
With older versions of Featuretools, users would first create an EntitySet object, and then add dataframes to the EntitySet, by calling `entity_from_dataframe` as shown below.```pythones = ft.EntitySet('old_es')es.entity_from_dataframe(dataframe=orders_df, entity_id='orders', index='order_id', time_index='order_date')es.entity_from_dataframe(dataframe=items_df, entity_id='items', index='id')``````Entityset: old_es Entities: orders [Rows: 3, Columns: 2] items [Rows: 5, Columns: 3] Relationships: No relationships``` With Featuretools 1.0, the steps for adding a dataframe to an EntitySet are the same, but some of the details have changed. First, create an EntitySet as before. To add the dataframe call `EntitySet.add_dataframe` in place of the previous `EntitySet.entity_from_dataframe` call. Note that the name of the dataframe is specified in the `dataframe_name` argument, which was previously called `entity_id`.
###Code
es = ft.EntitySet('new_es')
es.add_dataframe(dataframe=orders_df,
dataframe_name='orders',
index='order_id',
time_index='order_date')
###Output
_____no_output_____
###Markdown
You can also define the name, index, and time index by first [initializing Woodwork](https://woodwork.alteryx.com/en/stable/generated/woodwork.table_accessor.WoodworkTableAccessor.init.htmlwoodwork.table_accessor.WoodworkTableAccessor.init) on the dataframe and then passing the Woodwork initialized dataframe directly to the `add_dataframe` call. For this example we will initialize Woodwork on `items_df`, setting the dataframe name as `items` and specifying that the index should be the `id` column.
###Code
items_df.ww.init(name='items', index='id')
items_df.ww
###Output
_____no_output_____
###Markdown
With Woodwork initialized, we no longer need to specify values for the `dataframe_name` or `index` arguments when calling `add_dataframe` as Featuretools will simply use the values that were already specified when Woodwork was initialized.
###Code
es.add_dataframe(dataframe=items_df)
###Output
_____no_output_____
###Markdown
Accessing column typing informationPreviously, column variable type information could be accessed for an entire Entity through `Entity.variable_types` or for an individual column by selecting the individual column first through `es['entity_id']['col_id']`.```pythones['items'].variable_types``````{'id': featuretools.variable_types.variable.Index, 'order_id': featuretools.variable_types.variable.Numeric, 'item_price': featuretools.variable_types.variable.Numeric}``````pythones['items']['item_price']`````````With the updated version of Featuretools, the logical types and semantic tags for all of the columns in a single dataframe can be viewed through the `.ww` namespace on the dataframe. First, select the dataframe from the EntitySet with `es['dataframe_name']` and then access the typing information by chaining a `.ww` call on the end as shown below.
###Code
es['items'].ww
###Output
_____no_output_____
###Markdown
The logical type and semantic tags for a single column can be obtained from the Woodwork columns dictionary stored on the dataframe, returning a `Woodwork.ColumnSchema` object that stores the typing information:
###Code
es['items'].ww.columns['item_price']
###Output
_____no_output_____
###Markdown
Type inference and updating column typesFeaturetools will attempt to infer types for any columns that do not have types defined by the user. Prior to version 1.0, Featuretools implemented custom type inference code to determine what variable type should be assigned to each column. You could see the inferred variable types by viewing the contents of the `Entity.variable_types` dictionary.Starting in Featuretools 1.0, column type inference is being handled by Woodwork. Any columns that do not have a logical type assigned by the user when adding a dataframe to an EntitySet will have their logical types inferred by Woodwork. As before, type inference can be skipped for any columns in a dataframe by passing the appropriate logical types in a dictionary when calling `EntitySet.add_dataframe`.As an example, we can create a new dataframe and add it to an EntitySet, specifying the logical type for the user's full name as the Woodwork `PersonFullName` logical type.
###Code
users_df = pd.DataFrame({
'id': [0, 1, 2],
'name': ['John Doe', 'Rita Book', 'Teri Dactyl']
})
es.add_dataframe(dataframe=users_df,
dataframe_name='users',
index='id',
logical_types={'name': 'PersonFullName'})
es['users'].ww
###Output
_____no_output_____
###Markdown
Looking at the typing information above, we can see that the logical type for the `name` column was set to `PersonFullName` as we specified.Situations will occur where type inference identifies a column as having the incorrect logical type. In these situations, the logical type can be updated using the Woodwork `set_types` method. Let's say we want the `order_id` column of the `orders` dataframe to have a `Categorical` logical type instead of the `Integer` type that was inferred. Previously, this would have accomplished through the `Entity.convert_variable_type` method.```pythonfrom featuretools.variable_types import Categoricales['items'].convert_variable_type(variable_id='order_id', new_type=Categorical)```Now, we can perform this same update using Woodwork:
###Code
es['items'].ww.set_types(logical_types={'order_id': 'Categorical'})
es['items'].ww
###Output
_____no_output_____
###Markdown
For additional information on Woodwork typing and how it is used in Featuretools, refer to [Woodwork Typing in Featuretools](../getting_started/woodwork_types.ipynb). Adding interesting valuesInteresting values can be added to all dataframes in an EntitySet, a single dataframe in an EntitySet, or to a single column of a dataframe in an EntitySet.To add interesting values for all of the dataframes in an EntitySet, simply call `EntitySet.add_interesting_values`, optionally specifying the maximum number of values to add for each column. This remains unchanged from older versions of Featuretools to the 1.0 release.Adding values for a single dataframe or for a single column has changed. Previously to add interesting values for an Entity, users would call `Entity.add_interesting_values()`:```pythones['items'].add_interesting_values()```Now, in order to specify interesting values for a single dataframe, you call `add_interesting_values` on the EntitySet, and pass the name of the dataframe for which you want interesting values added:
###Code
es.add_interesting_values(dataframe_name='items')
###Output
_____no_output_____
###Markdown
Previously, to manually add interesting values for a column, you would simply assign them to the attribute of the variable:```pythones['items']['order_id'].interesting_values = [1, 2]```Now, this is done through `EntitySet.add_interesting_values`, passing in the name of the dataframe and a dictionary mapping column names to the interesting values to assign for that column. For example, to assign the interesting values of `[1, 2]` to the `order_id` column of the `items` dataframe, use the following approach:
###Code
es.add_interesting_values(dataframe_name='items',
values={'order_id': [1, 2]})
###Output
_____no_output_____
###Markdown
Interesting values for multiple columns in the same dataframe can be assigned by adding more entries to the dictionary passed to the `values` parameter.Accessing interesting values has changed as well. Previously interesting values could be viewed from the variable:```pythones['items']['order_id'].interesting_values```Interesting values are now stored in the Woodwork metadata for the columns in a dataframe:
###Code
es['items'].ww.columns['order_id'].metadata['interesting_values']
###Output
_____no_output_____
###Markdown
Setting a secondary time indexIn earlier versions of Featuretools, a secondary time index could be set on an Entity by calling `Entity.set_secondary_time_index`. ```pythones_flight = ft.demo.load_flight(nrows=100)arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay', 'national_airspace_delay', 'security_delay', 'late_aircraft_delay', 'canceled', 'diverted', 'taxi_in', 'taxi_out', 'air_time', 'dep_time']es_flight['trip_logs'].set_secondary_time_index({'arr_time': arr_time_columns})```Since the `Entity` class has been removed in Featuretools 1.0, this now needs to be done through the `EntitySet` instead:
###Code
es_flight = ft.demo.load_flight(nrows=100)
arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay',
'national_airspace_delay', 'security_delay',
'late_aircraft_delay', 'canceled', 'diverted',
'taxi_in', 'taxi_out', 'air_time', 'dep_time']
es_flight.set_secondary_time_index(dataframe_name='trip_logs',
secondary_time_index={'arr_time': arr_time_columns})
###Output
_____no_output_____
###Markdown
Previously, the secondary time index could be accessed directly from the Entity with `es_flight['trip_logs'].secondary_time_index`. Starting in Featuretools 1.0 the secondary time index and the associated columns are stored in the Woodwork dataframe metadata and can be accessed as shown below.
###Code
es_flight['trip_logs'].ww.metadata['secondary_time_index']
###Output
_____no_output_____
###Markdown
Normalizing Entities/DataFrames`EntitySet.normalize_entity` has been renamed to `EntitySet.normalize_dataframe` in Featuretools 1.0. The new method works in the same way as the old method, but some of the parameters have been renamed. The table below shows the old and new names for reference. When calling this method, the new parameter names need to be used.| Old Parameter Name | New Parameter Name || --- | --- || base_entity_id | base_dataframe_name || new_entity_id | new_dataframe_name || additional_variables | additional_columns || copy_variables | copy_columns || new_entity_time_index | new_dataframe_time_index || new_entity_secondary_time_index | new_dataframe_secondary_time_index | Defining and adding relationshipsIn earlier versions of Featuretools, relationships were defined by creating a `Relationship` object, which took two `Variables` as inputs. To define a relationship between the orders Entity and the items Entity, we would first create a `Relationship` and then add it to the EntitySet:```pythonrelationship = ft.Relationship(es['orders']['order_id'], es['items']['order_id'])es.add_relationship(relationship)```With Featuretools 1.0, the process is similar, but there are two different ways to add the relationship to the EntitySet. One way is to pass the dataframe and column names to `EntitySet.add_relationship`, and another is to pass a previously created `Relationship` object to the `relationship` keyword argument. Both approaches are demonstrated below.
###Code
# Undo change from above and change child column logical type to match parent and prevent warning
# NOTE: This cell is hidden in the docs build
es['items'].ww.set_types(logical_types={'order_id': 'Integer'})
es.add_relationship(parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
# Reset the relationship so we can add it again
# NOTE: This cell is hidden in the docs build
es.relationships = []
###Output
_____no_output_____
###Markdown
Alternatively, we can first create a `Relationship` and pass that to `EntitySet.add_relationship`. When defining a `Relationship` we need to pass in the EntitySet to which it belongs along with the names for the parent dataframe and parent column and the name of the child dataframe and child column.
###Code
relationship = ft.Relationship(entityset=es,
parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
es.add_relationship(relationship=relationship)
###Output
_____no_output_____
###Markdown
Updating data for a dataframe in an EntitySetPreviously to update (replace) the data associated with an Entity, users could call `Entity.update_data` and pass in the new dataframe. As an example, let's update the data in our `users` Entity:```pythonnew_users_df = pd.DataFrame({ 'id': [3, 4], 'name': ['Anne Teak', 'Art Decco']})es['users'].update_data(df=new_users_df)```To accomplish this task with Featuretools 1.0, we will use the `EntitySet.replace_dataframe` method instead:
###Code
new_users_df = pd.DataFrame({
'id': [0, 1],
'name': ['Anne Teak', 'Art Decco']
})
es.replace_dataframe(dataframe_name='users', df=new_users_df)
es['users']
###Output
_____no_output_____
###Markdown
Defining featuresThe syntax for defining features has changed slightly in Featuretools 1.0. Previously, identity features could be defined simply by passing in the variable that should be used to build the feature.```pythonfeature = ft.Feature(es['items']['item_price'])```Starting with Featuretools 1.0, a similar syntax can be used, but because `es['items']` will now return a Woodwork dataframe instead of an `Entity`, we need to update the syntax slightly to access the Woodwork column. To update, simply add `.ww` between the dataframe name selector and the column selector as shown below.
###Code
feature = ft.Feature(es['items'].ww['item_price'])
###Output
_____no_output_____
###Markdown
Defining primitivesIn earlier versions of Featuretools, primitive input and return types were defined by specifying the appropriate `Variable` class. Starting in version 1.0, the input and return types are defined by Woodwork `ColumnSchema` objects. To illustrate this change, let's look closer at the `Age` transform primitive. This primitive takes a datetime representing a date of birth and returns a numeric value corresponding to a person's age. In previous versions of Featuretools, the input type was defined by specifying the `DateOfBirth` variable type and the return type was specified by the `Numeric` variable type:```pythoninput_types = [DateOfBirth]return_type = Numeric```Woodwork does not have a specific `DateOfBirth` logical type, but rather identifies a column as a date of birth column by specifying the logical type as `Datetime` with a semantic tag of `date_of_birth`. There is also no `Numeric` logical type in Woodwork, but rather Woodwork identifies all columns that can be used for numeric operations with the semantic tag of `numeric`. Furthermore, we know the `Age` primitive will return a floating point number, which would correspond to a Woodwork logical type of `Double`. With these items in mind, we can redefine the `Age` input types and return types with `ColumnSchema` objects as follows:```pythoninput_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'})]return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})```Aside from changing the way input and return types are defined, the rest of the process for defining primitives remains unchanged. Mapping from old Featuretools variable types to Woodwork ColumnSchemasTypes defined by Woodwork differ from the old variable types that were defined by Featuretools prior to version 1.0. While there is not a direct mapping from the old variable types to the new Woodwork types defined by `ColumnSchema` objects, the approximate mapping is shown below.| Featuretools Variable | Woodwork Column Schema || --- | --- || Boolean | ColumnSchema(logical_type=Boolean) or ColumnSchema(logical_type=BooleanNullable) || Categorical | ColumnSchema(logical_type=Categorical) || CountryCode | ColumnSchema(logical_type=CountryCode) || Datetime | ColumnSchema(logical_type=Datetime) || DateOfBirth | ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'}) || DatetimeTimeIndex | ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}) || Discrete | ColumnSchema(semantic_tags={'category'}) || EmailAddress | ColumnSchema(logical_type=EmailAddress) || FilePath | ColumnSchema(logical_type=Filepath) || FullName | ColumnSchema(logical_type=PersonFullName) || Id | ColumnSchema(semantic_tags={'foreign_key'}) || Index | ColumnSchema(semantic_tags={'index'}) || IPAddress | ColumnSchema(logical_type=IPAddress) || LatLong | ColumnSchema(logical_type=LatLong) || NaturalLanguage | ColumnSchema(logical_type=NaturalLanguage) || Numeric | ColumnSchema(semantic_tags={'numeric'}) || NumericTimeIndex | ColumnSchema(semantic_tags={'numeric', 'time_index'}) || Ordinal | ColumnSchema(logical_type=Ordinal) || PhoneNumber | ColumnSchema(logical_type=PhoneNumber) || SubRegionCode | ColumnSchema(logical_type=SubRegionCode) || Timedelta | ColumnSchema(logical_type=Timedelta) || TimeIndex | ColumnSchema(semantic_tags={'time_index'}) || URL | ColumnSchema(logical_type=URL) || Unknown | ColumnSchema(logical_type=Unknown) || ZIPCode | ColumnSchema(logical_type=PostalCode) | Changes to Deep Feature Synthesis and Calculate Feature MatrixThe argument names for both `featuretools.dfs` and `featuretools.calculate_feature_matrix` have changed slightly in Featuretools 1.0. In prior versions, users could generate a list of features using the default primitives and options like this:```pythonfeatures = ft.dfs(entityset=es, target_entity='items', features_only=True)```In Featuretools 1.0, the `target_entity` argument has been renamed to `target_dataframe_name`, but otherwise this basic call remains the same.
###Code
features = ft.dfs(entityset=es,
target_dataframe_name='items',
features_only=True)
features
###Output
_____no_output_____
###Markdown
In addition, the `dfs` argument `ignore_entities` was renamed to `ignore_dataframes` and `ignore_variables` was renamed to `ignore_columns`. Similarly, if specifying primitive options, all references to `entities` should be replaced with `dataframes` and references to `variables` should be replaced with columns. For example, the primitive option of `include_groupby_entities` is now `include_groupby_dataframes` and `include_variables` is now `include_columns`.The basic call to `featuretools.calculate_feature_matrix` remains unchanged if passing in an EntitySet along with a list of features to caluculate. However, users calling `calculate_feature_matrix` by passing in a list of `entities` and `relationships` should note that the `entities` argument has been renamed to `dataframes` and the values in the dictionary values should now include Woodwork logical types instead of Featuretools `Variable` classes.
###Code
feature_matrix = ft.calculate_feature_matrix(features=features, entityset=es)
feature_matrix
###Output
_____no_output_____
###Markdown
In addition to the changes in argument names, there are a couple other changes to the returned feature matrix that users should be aware of. First, because of slight differences in the way Woodwork defines column types compared to how the prior Featuretools implementation did, there can be some differences in the features that are generated between old and new versions. The most notable impact is in the way foreign key columns are handled. Previously, Featuretools treated all foreign key (previously `Id`) columns as categorical columns, and would generate appropriate features from these columns. Starting in version 1.0, foreign key columns are not constrained to be categorical, and if they are another type such as `Integer`, features will not be generated from these columns. Manually converting foreign key columns to `Categorical` as shown above will result in features much closer to those achieved with previous versions.Also, because Woodwork's type inference process differs from the previous Featuretools type inference process, an EntitySet may have column types identified differently. This difference in column types could impact the features that are generated. If it is important to have the same set of features, check all of the logical types in the EntitySet dataframes and update them to the expected types if there are columns that have been inferred as unexpected types.Finally, the feature matrix calculated by Featuretools will now have Woodwork initialized. This means that users can view feature matrix column typing information through the Woodwork namespace as follows.
###Code
feature_matrix.ww
###Output
_____no_output_____
###Markdown
Featuretools now labels features by whether they were originally in the dataframes, or whether they were created by Featuretools. This information is stored in the Woodwork `origin` attribute for the column. Columns that were in the original data will be labeled with `base` and features that were created by Featuretools will be labeled with `engineered`.As a demonstration of how to access this information, let's compare two features in the feature matrix: `item_price` and `orders.MEAN(items.item_price)`. `item_price` was present in the original data, and `orders.MEAN(items.item_price)` was created by Featuretools.
###Code
feature_matrix.ww['item_price'].ww.origin
feature_matrix.ww['orders.MEAN(items.item_price)'].ww.origin
###Output
_____no_output_____
###Markdown
Transitioning to Featuretools Version 1.0Featuretools version 1.0 incorporates many significant changes that impact the way EntitySets are created, how primitives are defined, and in some cases the resulting feature matrix that is created. This document will provide an overview of the significant changes, helping existing Featuretools users transition to version 1.0. Background and Introduction Why make these changes?The lack of a unified type system across libraries makes sharing information between libraries more difficult. This problem led to the development of [Woodwork](https://woodwork.alteryx.com/en/stable/). Updating Featuretools to use Woodwork for managing column typing information enables easy sharing of feature matrix column types with other libraries without costly conversions between custom type systems. As an example, [EvalML](https://evalml.alteryx.com/en/stable/), which has also adopted Woodwork, can now use Woodwork typing information on a feature matrix directly to create machine learning models, without first inferring or redefining column types.Other benefits of using Woodwork for managing typing in Featuretools include:- Simplified code - custom type management code has been removed- Seamless integration of new types and improvements to type integration as Woodwork improves- Easy and flexible storage of additional information about columns. For example, we can now store whether a feature was engineered by Featuretools or present in the original data. What has changed?- The legacy Featuretools custom typing system has been replaced with Woodwork for managing column types- Both the `Entity` and `Variable` classes have been removed from Featuretools- Several key Featuretools methods have been moved or updated Comparison between legacy typing system and Woodwork typing systems| Featuretools < 1.0 | Featuretools 1.0 | Description || ---- | ---- | ---- || Entity | Woodwork DataFrame | stores typing information for all columns || Variable | ColumnSchema | stores typing information for a single column || Variable subclass | LogicalType and semantic_tags | elements used to define a column type | Summary of significant method changesThe table below outlines the most significant changes that have occurred. In Summary: In some cases, the method arguments have also changed, and those changes are outlined in more detail throughout this document.| Older Versions | Featuretools 1.0 || ---- | ---- || EntitySet.entity_from_dataframe | EntitySet.add_dataframe || EntitySet.normalize_entity | EntitySet.normalize_dataframe || EntitySet.update_data | EntitySet.replace_dataframe || Entity.variable_types | es['dataframe_name'].ww || es['entity_id']['variable_name'] | es['dataframe_name'].ww.columns['column_name'] || Entity.convert_variable_type | es['dataframe_name'].ww.set_types || Entity.add_interesting_values | es.add_interesting_values(dataframe_name='df_name', ...) || Entity.set_secondary_time_index | es.set_secondary_time_index(dataframe_name='df_name', ...) || Feature(es['entity_id']['variable_name']) | Feature(es['dataframe_name'].ww['column_name']) || dfs(target_entity='entity_id', ...) | dfs(target_dataframe_name='dataframe_name', ...) | For more information on how Woodwork manages typing information, refer to the [Woodwork Understanding Types and Tags](https://woodwork.alteryx.com/en/stable/guides/logical_types_and_semantic_tags.html) guide. What do these changes mean for users?Removing these classes required moving several methods from the `Entity` to the `EntitySet` object. This change also impacts the way relationships, features and primitives are defined, requiring different parameters than were previously required. Also, because the Woodwork typing system is not identical to the old Featuretools typing system, in some cases the feature matrix that is returned can be slightly different as a result of columns being identified as different types.All of these changes, and more, will be reviewed in detail throughout this document, providing examples of both the old and new API where possible. Removal of `Entity` Class and Updates to `EntitySet`In previous versions of Featuretools an EntitySet was created by adding multiple entities and then defining relationships between variables (columns) in different entities. Starting in Featuretools version 1.0, EntitySets are now created by adding multiple dataframes and defining relationships between columns in the dataframes. While conceptually similar, there are some minor differences in the process. Adding dataframes to an EntitySetWhen adding dataframes to an EntitySet, users can pass in a Woodwork dataframe or a regular dataframe without Woodwork typing information. As before, Featuretools supports creating EntitySets from pandas, Dask and Spark dataframes. If users supply a dataframe that has Woodwork typing information initialized, Featuretools will simply use this typing information directly. If users supply a dataframe without Woodwork initialized, Featuretools will initialize Woodwork on the dataframe, performing type inference for any column that does not have typing information specified.Below are some examples to illustrate this process. First we will create two small dataframes to use for the example.
###Code
import featuretools as ft
import pandas as pd
import woodwork as ww
orders_df = pd.DataFrame({
'order_id': [0, 1, 2],
'order_date': ['2021-01-02', '2021-01-03', '2021-01-04']
})
items_df = pd.DataFrame({
'id': [0, 1, 2, 3, 4],
'order_id': [0, 1, 1, 2, 2],
'item_price': [29.95, 4.99, 10.25, 20.50, 15.99],
'on_sale': [False, True, False, True, False]
})
###Output
_____no_output_____
###Markdown
With older versions of Featuretools, users would first create an EntitySet object, and then add dataframes to the EntitySet, by calling `entity_from_dataframe` as shown below.```pythones = ft.EntitySet('old_es')es.entity_from_dataframe(dataframe=orders_df, entity_id='orders', index='order_id', time_index='order_date')es.entity_from_dataframe(dataframe=items_df, entity_id='items', index='id')``````Entityset: old_es Entities: orders [Rows: 3, Columns: 2] items [Rows: 5, Columns: 3] Relationships: No relationships``` With Featuretools 1.0, the steps for adding a dataframe to an EntitySet are the same, but some of the details have changed. First, create an EntitySet as before. To add the dataframe call `EntitySet.add_dataframe` in place of the previous `EntitySet.entity_from_dataframe` call. Note that the name of the dataframe is specified in the `dataframe_name` argument, which was previously called `entity_id`.
###Code
es = ft.EntitySet('new_es')
es.add_dataframe(dataframe=orders_df,
dataframe_name='orders',
index='order_id',
time_index='order_date')
###Output
_____no_output_____
###Markdown
You can also define the name, index, and time index by first [initializing Woodwork](https://woodwork.alteryx.com/en/stable/generated/woodwork.table_accessor.WoodworkTableAccessor.init.htmlwoodwork.table_accessor.WoodworkTableAccessor.init) on the dataframe and then passing the Woodwork initialized dataframe directly to the `add_dataframe` call. For this example we will initialize Woodwork on `items_df`, setting the dataframe name as `items` and specifying that the index should be the `id` column.
###Code
items_df.ww.init(name='items', index='id')
items_df.ww
###Output
_____no_output_____
###Markdown
With Woodwork initialized, we no longer need to specify values for the `dataframe_name` or `index` arguments when calling `add_dataframe` as Featuretools will simply use the values that were already specified when Woodwork was initialized.
###Code
es.add_dataframe(dataframe=items_df)
###Output
_____no_output_____
###Markdown
Accessing column typing informationPreviously, column variable type information could be accessed for an entire Entity through `Entity.variable_types` or for an individual column by selecting the individual column first through `es['entity_id']['col_id']`.```pythones['items'].variable_types``````{'id': featuretools.variable_types.variable.Index, 'order_id': featuretools.variable_types.variable.Numeric, 'item_price': featuretools.variable_types.variable.Numeric}``````pythones['items']['item_price']`````````With the updated version of Featuretools, the logical types and semantic tags for all of the columns in a single dataframe can be viewed through the `.ww` namespace on the dataframe. First, select the dataframe from the EntitySet with `es['dataframe_name']` and then access the typing information by chaining a `.ww` call on the end as shown below.
###Code
es['items'].ww
###Output
_____no_output_____
###Markdown
The logical type and semantic tags for a single column can be obtained from the Woodwork columns dictionary stored on the dataframe, returning a `Woodwork.ColumnSchema` object that stores the typing information:
###Code
es['items'].ww.columns['item_price']
###Output
_____no_output_____
###Markdown
Type inference and updating column typesFeaturetools will attempt to infer types for any columns that do not have types defined by the user. Prior to version 1.0, Featuretools implemented custom type inference code to determine what variable type should be assigned to each column. You could see the inferred variable types by viewing the contents of the `Entity.variable_types` dictionary.Starting in Featuretools 1.0, column type inference is being handled by Woodwork. Any columns that do not have a logical type assigned by the user when adding a dataframe to an EntitySet will have their logical types inferred by Woodwork. As before, type inference can be skipped for any columns in a dataframe by passing the appropriate logical types in a dictionary when calling `EntitySet.add_dataframe`.As an example, we can create a new dataframe and add it to an EntitySet, specifying the logical type for the user's full name as the Woodwork `PersonFullName` logical type.
###Code
users_df = pd.DataFrame({
'id': [0, 1, 2],
'name': ['John Doe', 'Rita Book', 'Teri Dactyl']
})
es.add_dataframe(dataframe=users_df,
dataframe_name='users',
index='id',
logical_types={'name': 'PersonFullName'})
es['users'].ww
###Output
_____no_output_____
###Markdown
Looking at the typing information above, we can see that the logical type for the `name` column was set to `PersonFullName` as we specified.Situations will occur where type inference identifies a column as having the incorrect logical type. In these situations, the logical type can be updated using the Woodwork `set_types` method. Let's say we want the `order_id` column of the `orders` dataframe to have a `Categorical` logical type instead of the `Integer` type that was inferred. Previously, this would have accomplished through the `Entity.convert_variable_type` method.```pythonfrom featuretools.variable_types import Categoricales['items'].convert_variable_type(variable_id='order_id', new_type=Categorical)```Now, we can perform this same update using Woodwork:
###Code
es['items'].ww.set_types(logical_types={'order_id': 'Categorical'})
es['items'].ww
###Output
_____no_output_____
###Markdown
For additional information on Woodwork typing and how it is used in Featuretools, refer to [Woodwork Typing in Featuretools](../getting_started/woodwork_types.ipynb). Adding interesting valuesInteresting values can be added to all dataframes in an EntitySet, a single dataframe in an EntitySet, or to a single column of a dataframe in an EntitySet.To add interesting values for all of the dataframes in an EntitySet, simply call `EntitySet.add_interesting_values`, optionally specifying the maximum number of values to add for each column. This remains unchanged from older versions of Featuretools to the 1.0 release.Adding values for a single dataframe or for a single column has changed. Previously to add interesting values for an Entity, users would call `Entity.add_interesting_values()`:```pythones['items'].add_interesting_values()```Now, in order to specify interesting values for a single dataframe, you call `add_interesting_values` on the EntitySet, and pass the name of the dataframe for which you want interesting values added:
###Code
es.add_interesting_values(dataframe_name='items')
###Output
_____no_output_____
###Markdown
Previously, to manually add interesting values for a column, you would simply assign them to the attribute of the variable:```pythones['items']['order_id'].interesting_values = [1, 2]```Now, this is done through `EntitySet.add_interesting_values`, passing in the name of the dataframe and a dictionary mapping column names to the interesting values to assign for that column. For example, to assign the interesting values of `[1, 2]` to the `order_id` column of the `items` dataframe, use the following approach:
###Code
es.add_interesting_values(dataframe_name='items',
values={'order_id': [1, 2]})
###Output
_____no_output_____
###Markdown
Interesting values for multiple columns in the same dataframe can be assigned by adding more entries to the dictionary passed to the `values` parameter.Accessing interesting values has changed as well. Previously interesting values could be viewed from the variable:```pythones['items']['order_id'].interesting_values```Interesting values are now stored in the Woodwork metadata for the columns in a dataframe:
###Code
es['items'].ww.columns['order_id'].metadata['interesting_values']
###Output
_____no_output_____
###Markdown
Setting a secondary time indexIn earlier versions of Featuretools, a secondary time index could be set on an Entity by calling `Entity.set_secondary_time_index`. ```pythones_flight = ft.demo.load_flight(nrows=100)arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay', 'national_airspace_delay', 'security_delay', 'late_aircraft_delay', 'canceled', 'diverted', 'taxi_in', 'taxi_out', 'air_time', 'dep_time']es_flight['trip_logs'].set_secondary_time_index({'arr_time': arr_time_columns})```Since the `Entity` class has been removed in Featuretools 1.0, this now needs to be done through the `EntitySet` instead:
###Code
es_flight = ft.demo.load_flight(nrows=100)
arr_time_columns = ['arr_delay', 'dep_delay', 'carrier_delay', 'weather_delay',
'national_airspace_delay', 'security_delay',
'late_aircraft_delay', 'canceled', 'diverted',
'taxi_in', 'taxi_out', 'air_time', 'dep_time']
es_flight.set_secondary_time_index(dataframe_name='trip_logs',
secondary_time_index={'arr_time': arr_time_columns})
###Output
_____no_output_____
###Markdown
Previously, the secondary time index could be accessed directly from the Entity with `es_flight['trip_logs'].secondary_time_index`. Starting in Featuretools 1.0 the secondary time index and the associated columns are stored in the Woodwork dataframe metadata and can be accessed as shown below.
###Code
es_flight['trip_logs'].ww.metadata['secondary_time_index']
###Output
_____no_output_____
###Markdown
Normalizing Entities/DataFrames`EntitySet.normalize_entity` has been renamed to `EntitySet.normalize_dataframe` in Featuretools 1.0. The new method works in the same way as the old method, but some of the parameters have been renamed. The table below shows the old and new names for reference. When calling this method, the new parameter names need to be used.| Old Parameter Name | New Parameter Name || --- | --- || base_entity_id | base_dataframe_name || new_entity_id | new_dataframe_name || additional_variables | additional_columns || copy_variables | copy_columns || new_entity_time_index | new_dataframe_time_index || new_entity_secondary_time_index | new_dataframe_secondary_time_index | Defining and adding relationshipsIn earlier versions of Featuretools, relationships were defined by creating a `Relationship` object, which took two `Variables` as inputs. To define a relationship between the orders Entity and the items Entity, we would first create a `Relationship` and then add it to the EntitySet:```pythonrelationship = ft.Relationship(es['orders']['order_id'], es['items']['order_id'])es.add_relationship(relationship)```With Featuretools 1.0, the process is similar, but there are two different ways to add the relationship to the EntitySet. One way is to pass the dataframe and column names to `EntitySet.add_relationship`, and another is to pass a previously created `Relationship` object to the `relationship` keyword argument. Both approaches are demonstrated below.
###Code
# Undo change from above and change child column logical type to match parent and prevent warning
# NOTE: This cell is hidden in the docs build
es['items'].ww.set_types(logical_types={'order_id': 'Integer'})
es.add_relationship(parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
# Reset the relationship so we can add it again
# NOTE: This cell is hidden in the docs build
es.relationships = []
###Output
_____no_output_____
###Markdown
Alternatively, we can first create a `Relationship` and pass that to `EntitySet.add_relationship`. When defining a `Relationship` we need to pass in the EntitySet to which it belongs along with the names for the parent dataframe and parent column and the name of the child dataframe and child column.
###Code
relationship = ft.Relationship(entityset=es,
parent_dataframe_name='orders',
parent_column_name='order_id',
child_dataframe_name='items',
child_column_name='order_id')
es.add_relationship(relationship=relationship)
###Output
_____no_output_____
###Markdown
Updating data for a dataframe in an EntitySetPreviously to update (replace) the data associated with an Entity, users could call `Entity.update_data` and pass in the new dataframe. As an example, let's update the data in our `users` Entity:```pythonnew_users_df = pd.DataFrame({ 'id': [3, 4], 'name': ['Anne Teak', 'Art Decco']})es['users'].update_data(df=new_users_df)```To accomplish this task with Featuretools 1.0, we will use the `EntitySet.replace_dataframe` method instead:
###Code
new_users_df = pd.DataFrame({
'id': [0, 1],
'name': ['Anne Teak', 'Art Decco']
})
es.replace_dataframe(dataframe_name='users', df=new_users_df)
es['users']
###Output
_____no_output_____
###Markdown
Defining featuresThe syntax for defining features has changed slightly in Featuretools 1.0. Previously, identity features could be defined simply by passing in the variable that should be used to build the feature.```pythonfeature = ft.Feature(es['items']['item_price'])```Starting with Featuretools 1.0, a similar syntax can be used, but because `es['items']` will now return a Woodwork dataframe instead of an `Entity`, we need to update the syntax slightly to access the Woodwork column. To update, simply add `.ww` between the dataframe name selector and the column selector as shown below.
###Code
feature = ft.Feature(es['items'].ww['item_price'])
###Output
_____no_output_____
###Markdown
Defining primitivesIn earlier versions of Featuretools, primitive input and return types were defined by specifying the appropriate `Variable` class. Starting in version 1.0, the input and return types are defined by Woodwork `ColumnSchema` objects. To illustrate this change, let's look closer at the `Age` transform primitive. This primitive takes a datetime representing a date of birth and returns a numeric value corresponding to a person's age. In previous versions of Featuretools, the input type was defined by specifying the `DateOfBirth` variable type and the return type was specified by the `Numeric` variable type:```pythoninput_types = [DateOfBirth]return_type = Numeric```Woodwork does not have a specific `DateOfBirth` logical type, but rather identifies a column as a date of birth column by specifying the logical type as `Datetime` with a semantic tag of `date_of_birth`. There is also no `Numeric` logical type in Woodwork, but rather Woodwork identifies all columns that can be used for numeric operations with the semantic tag of `numeric`. Furthermore, we know the `Age` primitive will return a floating point number, which would correspond to a Woodwork logical type of `Double`. With these items in mind, we can redefine the `Age` input types and return types with `ColumnSchema` objects as follows:```pythoninput_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'})]return_type = ColumnSchema(logical_type=Double, semantic_tags={'numeric'})```Aside from changing the way input and return types are defined, the rest of the process for defining primitives remains unchanged. Mapping from old Featuretools variable types to Woodwork ColumnSchemasTypes defined by Woodwork differ from the old variable types that were defined by Featuretools prior to version 1.0. While there is not a direct mapping from the old variable types to the new Woodwork types defined by `ColumnSchema` objects, the approximate mapping is shown below.| Featuretools Variable | Woodwork Column Schema || --- | --- || Boolean | ColumnSchema(logical_type=Boolean) or ColumnSchema(logical_type=BooleanNullable) || Categorical | ColumnSchema(logical_type=Categorical) || CountryCode | ColumnSchema(logical_type=CountryCode) || Datetime | ColumnSchema(logical_type=Datetime) || DateOfBirth | ColumnSchema(logical_type=Datetime, semantic_tags={'date_of_birth'}) || DatetimeTimeIndex | ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'}) || Discrete | ColumnSchema(semantic_tags={'category'}) || EmailAddress | ColumnSchema(logical_type=EmailAddress) || FilePath | ColumnSchema(logical_type=Filepath) || FullName | ColumnSchema(logical_type=PersonFullName) || Id | ColumnSchema(semantic_tags={'foreign_key'}) || Index | ColumnSchema(semantic_tags={'index'}) || IPAddress | ColumnSchema(logical_type=IPAddress) || LatLong | ColumnSchema(logical_type=LatLong) || NaturalLanguage | ColumnSchema(logical_type=NaturalLanguage) || Numeric | ColumnSchema(semantic_tags={'numeric'}) || NumericTimeIndex | ColumnSchema(semantic_tags={'numeric', 'time_index'}) || Ordinal | ColumnSchema(logical_type=Ordinal) || PhoneNumber | ColumnSchema(logical_type=PhoneNumber) || SubRegionCode | ColumnSchema(logical_type=SubRegionCode) || Timedelta | ColumnSchema(logical_type=Timedelta) || TimeIndex | ColumnSchema(semantic_tags={'time_index'}) || URL | ColumnSchema(logical_type=URL) || Unknown | ColumnSchema(logical_type=Unknown) || ZIPCode | ColumnSchema(logical_type=PostalCode) | Changes to Deep Feature Synthesis and Calculate Feature MatrixThe argument names for both `featuretools.dfs` and `featuretools.calculate_feature_matrix` have changed slightly in Featuretools 1.0. In prior versions, users could generate a list of features using the default primitives and options like this:```pythonfeatures = ft.dfs(entityset=es, target_entity='items', features_only=True)```In Featuretools 1.0, the `target_entity` argument has been renamed to `target_dataframe_name`, but otherwise this basic call remains the same.
###Code
features = ft.dfs(entityset=es,
target_dataframe_name='items',
features_only=True)
features
###Output
_____no_output_____
###Markdown
In addition, the `dfs` argument `ignore_entities` was renamed to `ignore_dataframes` and `ignore_variables` was renamed to `ignore_columns`. Similarly, if specifying primitive options, all references to `entities` should be replaced with `dataframes` and references to `variables` should be replaced with columns. For example, the primitive option of `include_groupby_entities` is now `include_groupby_dataframes` and `include_variables` is now `include_columns`.The basic call to `featuretools.calculate_feature_matrix` remains unchanged if passing in an EntitySet along with a list of features to caluculate. However, users calling `calculate_feature_matrix` by passing in a list of `entities` and `relationships` should note that the `entities` argument has been renamed to `dataframes` and the values in the dictionary values should now include Woodwork logical types instead of Featuretools `Variable` classes.
###Code
feature_matrix = ft.calculate_feature_matrix(features=features, entityset=es)
feature_matrix
###Output
_____no_output_____
###Markdown
In addition to the changes in argument names, there are a couple other changes to the returned feature matrix that users should be aware of. First, because of slight differences in the way Woodwork defines column types compared to how the prior Featuretools implementation did, there can be some differences in the features that are generated between old and new versions. The most notable impact is in the way foreign key columns are handled. Previously, Featuretools treated all foreign key (previously `Id`) columns as categorical columns, and would generate appropriate features from these columns. Starting in version 1.0, foreign key columns are not constrained to be categorical, and if they are another type such as `Integer`, features will not be generated from these columns. Manually converting foreign key columns to `Categorical` as shown above will result in features much closer to those achieved with previous versions.Also, because Woodwork's type inference process differs from the previous Featuretools type inference process, an EntitySet may have column types identified differently. This difference in column types could impact the features that are generated. If it is important to have the same set of features, check all of the logical types in the EntitySet dataframes and update them to the expected types if there are columns that have been inferred as unexpected types.Finally, the feature matrix calculated by Featuretools will now have Woodwork initialized. This means that users can view feature matrix column typing information through the Woodwork namespace as follows.
###Code
feature_matrix.ww
###Output
_____no_output_____
###Markdown
Featuretools now labels features by whether they were originally in the dataframes, or whether they were created by Featuretools. This information is stored in the Woodwork `origin` attribute for the column. Columns that were in the original data will be labeled with `base` and features that were created by Featuretools will be labeled with `engineered`.As a demonstration of how to access this information, let's compare two features in the feature matrix: `item_price` and `orders.MEAN(items.item_price)`. `item_price` was present in the original data, and `orders.MEAN(items.item_price)` was created by Featuretools.
###Code
feature_matrix.ww['item_price'].ww.origin
feature_matrix.ww['orders.MEAN(items.item_price)'].ww.origin
###Output
_____no_output_____ |
docs/Resolution.ipynb | ###Markdown
*get_var_resolution(tableName, varName)*Returns spatial and temporal resolutions of the given variable. > **Parameters:** >> **tableName: string**>> The name of table associated with the dataset. A full list of table names can be found in the [catalog](Catalog.ipynb).>> >> **varName: string or list of string**>> Variable short name. A full list of variable short names can be found in the [catalog](Catalog.ipynb).>**Returns:** >> Pandas dataframe. Example
###Code
#!pip install pycmap -q #uncomment to install pycmap, if necessary
import pycmap
api = pycmap.API(token='<YOUR_API_KEY>')
api.get_var_resolution('tblModis_AOD_REP', 'AOD')
###Output
_____no_output_____ |
examples/CPX_NeoPixels.ipynb | ###Markdown
CircuitPython NeoPixelNeoPixels are a revolutionary and ultra-popular way to add lights and color to your project. These stranded RGB lights have the controller inside the LED, so you just push the RGB data and the LEDs do all the work for you! They're a perfect match for CircuitPython. Let's first check if Jupyter can read our CircuitPython board:
###Code
import os
print(os.uname())
###Output
_____no_output_____
###Markdown
First, we'll want to import the `time` and `board` pin definitions
###Code
import time
import board
###Output
_____no_output_____
###Markdown
Next, we'll want to import the neopixel library by running:
###Code
import neopixel
###Output
_____no_output_____
###Markdown
Did you receive an error: `ImportError: no module named 'neopixel'`? [You'll need to install the neopixel.mpy library if you don't have it yet.](https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest) Let's create a `pixels` object and configure it with the ten builtin neopixels on the Circuit Playground Express
###Code
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=.2)
pixels.fill((0, 0, 0))
pixels.show()
###Output
_____no_output_____
###Markdown
Ok, now let's make the neopixel ring flash by making a function called 'ring_flash'`
###Code
def flash_pixels(flash_speed=0.5):
print('flashing R')
pixels.fill((255, 0, 0))
pixels.show()
time.sleep(flash_speed)
print('flashing G')
pixels.fill((0, 255, 0))
pixels.show()
time.sleep(flash_speed)
print('flashing B')
pixels.fill((0, 0, 255))
pixels.show()
time.sleep(flash_speed)
###Output
_____no_output_____
###Markdown
We can increase the speed of the NeoPixels by definining a `flash_speed`
###Code
flash_speed = 0.25
###Output
_____no_output_____
###Markdown
Then, call `flash_pixels` and pass in the `flash_speed` variable we just created
###Code
flash_pixels(flash_speed)
###Output
_____no_output_____
###Markdown
CircuitPython NeoPixelNeoPixels are a revolutionary and ultra-popular way to add lights and color to your project. These stranded RGB lights have the controller inside the LED, so you just push the RGB data and the LEDs do all the work for you! They're a perfect match for CircuitPython. First, we'll want to import the `time` and `board` pin definitions
###Code
import time
import board
###Output
_____no_output_____
###Markdown
Next, we'll want to import the neopixel library by running:
###Code
import neopixel
###Output
_____no_output_____
###Markdown
Did you receive an error: `ImportError: no module named 'neopixel'`? [You'll need to install the neopixel.mpy library if you don't have it yet.](https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest) Let's create a `pixels` object and configure it with the ten builtin neopixels on the Circuit Playground Express
###Code
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=.2)
pixels.fill((0, 0, 0))
pixels.show()
###Output
_____no_output_____
###Markdown
Ok, now let's make the neopixel ring flash by making a function called 'ring_flash'`
###Code
def flash_pixels(flash_speed=0.5):
print('flashing R')
pixels.fill((255, 0, 0))
pixels.show()
time.sleep(flash_speed)
print('flashing G')
pixels.fill((0, 255, 0))
pixels.show()
time.sleep(flash_speed)
print('flashing B')
pixels.fill((0, 0, 255))
pixels.show()
time.sleep(flash_speed)
###Output
_____no_output_____
###Markdown
We can increase the speed of the NeoPixels by definining a `flash_speed`
###Code
flash_speed = 0.25
###Output
_____no_output_____
###Markdown
Then, call `flash_pixels` and pass in the `flash_speed` variable we just created
###Code
flash_pixels(flash_speed)
###Output
_____no_output_____ |
session_5/ex_5.ipynb | ###Markdown
Exercise set 5: causal forestIn this exercise set we will be working with the `econml` package to estimate a causal forest.Another more general implementation is found in [generalized random forest](https://github.com/grf-labs/grf) by Athey et al. The package is written for the R programming language.
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_classification
sns.set(style='darkgrid')
%matplotlib inline
###Output
_____no_output_____
###Markdown
To highlight the usefulness of causal forest we will be working with synthetic data in this exercise. In particular we will synthetically add a treatment effect to a dataset in which there otherwise is none. Furthermore we will make this effect heterogeneous by adding noise, and by making it depend on a single continuous variable as well as a categorical variable. >**Ex. 5.1.0:** Use the code below to simulate data according to\begin{align}T(X) &= \frac{1}{1+e^{-X\delta+U}} > 0.5 \\ \tau(X) &= \frac{1}{1+e^{-\gamma X_0}} \\Y(T=0) &= X\beta + \epsilon \\ Y(T=1) &= Y(0) + \tau(X) \\ \end{align}where $\epsilon, \nu$ are simply noise terms distributed according to $\mathcal{N}(0,1)$ and $\beta,\delta$ are `N_FEATURES` vector of random parameters. $\gamma$ is a scalar parameter.
###Code
N_SAMPLES = 10000
N_FEATURES = 5
GAMMA = 1.2
BETA = np.random.RandomState(0).uniform(0,1, size = N_FEATURES)
DELTA = np.random.RandomState(1).uniform(0,1, size = N_FEATURES)
X = np.random.RandomState(2).normal(size = (N_SAMPLES, N_FEATURES))
U = np.random.RandomState(3).normal(size = (N_SAMPLES))
T = 1/(1+np.exp(-(U+X.dot(DELTA))))>.5
Y0 = X @ BETA + np.random.RandomState(5).normal(size = N_SAMPLES)
tau = 10/(1 + np.exp(-GAMMA*X[:,0])) + np.random.normal(size = N_SAMPLES)
Y1 = Y0 + tau
y = Y0 + T*(Y1 - Y0)
###Output
_____no_output_____
###Markdown
> **Ex. 5.1.1:** Create a two-subplot figure, and plot $Y(0)$ and $Y(1)$ in one subplot against $X_0$. Plot $\tau(x)$ against $X_0$ in the other subplot. What do you see? Why do we observe $\tau=0$ in many cases?
###Code
# Your answer here
###Output
_____no_output_____
###Markdown
> **Ex. 5.1.2:** Is there a selection problem? Plot for each dimension of $X$ the relationship with treatment assignment.
###Code
# Your answer here
###Output
_____no_output_____
###Markdown
>**Ex.5.1.3:** Estimate a causal forest model using the `econml` package, and store the model in a new variable `cf`. >> To unconfound the treatment assignment, use the gradient boosted forest in the first "double machine learning" step. Then use the following line to create a dataframe of predicted treatment effects on the same data that you trained the model on. >> Hint: use the following setting >>```pythondiscrete_treatment=True```
###Code
# Your answer here
###Output
_____no_output_____
###Markdown
>**Ex.5.1.4:** Plot a scatterplot of the estimated individual treatment effects against the simulated "true" ITE's `tau` that you produced in the beginning of this exercise set.
###Code
# Your answer here
###Output
_____no_output_____ |
Notebooks/LossFunctionDev.ipynb | ###Markdown
function definitions
###Code
max_bkg_norm = 50.
def max_bkg(s, syst_factr):
return np.power(s, .65)*max_bkg_norm/np.power(syst_factr, 0.65)
def l_asimov(s, b, syst_factr=0.5):
spb = s+b
b2 = b*b
syst = syst_factr * b
syst2 = syst*syst
bpsyst2 = b+syst2
l_val = 0.5/(
spb * np.log(spb*bpsyst2/(b2+spb*syst2+1e-7)+1e-7)
-b2/(syst2+1e-7) * np.log(1+syst2*s/(b*bpsyst2+1e-7))
)
return l_val #+ bovers*100
def hybrid(s, b, syst_factr=0.5):
s+=1e-7
b+=1e-7
b_max = max_bkg(s, syst_factr)
ok = (b < b_max)
out = ok * l_asimov(s, b, syst_factr)
l_b_max = l_asimov(s, b_max, syst_factr)
l_b_max0p95 = l_asimov(s, b_max*0.95, syst_factr)
l_b_max1p05 = l_asimov(s, b_max*1.05, syst_factr)
slope = (l_b_max1p05-l_b_max0p95) / (b_max*0.1)
offset = l_b_max - slope*b_max
out += np.abs(1-ok) * (slope * b + offset)
# stabelize low bound
s_min = 0.2
at_low_bound = s < s_min
norm = l_asimov(s_min, b, syst_factr) / (b/s_min/s_min)
out = np.abs(1-at_low_bound)*out
out += at_low_bound*b/s/s*norm
return out
def signifiLoss(s, b):
return (b)/(s+1e-7)
# try some values by hand
s = 1e-4
b = 1e-1
print max_bkg(s, 0.5)
print l_asimov(s, b)
print hybrid(s, b)
###Output
0.1970786149885032
-547681.9436280145
15994965.429229902
###Markdown
Plot against b (l_asimov and hybrid)
###Code
b_max = 20000
s_max = 100
x_b = np.arange(0., b_max, b_max/1000.)
y_b = l_asimov(s_max, x_b, 0.5)
plt.plot(x_b,y_b)
y_b = hybrid(s_max, x_b, 0.5)
plt.plot(x_b,y_b)
plt.show()
# conclusion:
# by manually testing where the maximum in b is for a fixed s, these tables are derived (with safety margin):
# syst_factr = 0.5:
# s_min | b_max
# 1 | 200
# 10 | 1000
# 100 | 4000
# 1000 | 20000
# 10000 | 100000
# syst_factr = 0.3:
# s_min | b_max
# 1 | 300
# 10 | 1500
# 100 | 7000
# 1000 | 30000
# 10000 | 125000
# syst_factr = 0.1:
# s_min | b_max
# 1 | 600
# 10 | 3000
# 100 | 15000
# 1000 | 60000
# 10000 | 300000
###Output
_____no_output_____
###Markdown
Plot hybrid against sThe yellow curve shows a simple power law function, f(x) = const / x
###Code
x_s = np.arange(0., s_max, s_max/1000.)
y_s = hybrid(x_s, b_max, 0.5)
plt.semilogy(x_s,y_s)
plt.semilogy(x_s,10000./x_s)
plt.show()
###Output
_____no_output_____
###Markdown
2D plot of l_asimov and hybrid (manual log scale applied)
###Code
s = np.arange(0., 100.)*.01
b = np.arange(0., 100.)*10
S,B = np.meshgrid(s,b)
sns.heatmap(np.log(l_asimov(S,B)))
sns.heatmap(np.log(hybrid(S,B)))
###Output
/nfs/dust/cms/user/tholenhe/installs/anaconda2/envs/hepML3/lib/python2.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in log
"""Entry point for launching an IPython kernel.
|
nbconvert/preprocessors/tests/files/Parallel Execute A.ipynb | ###Markdown
Ensure notebooks can execute in parallelThis notebook uses a file system based "lock" to assert that two instances of the notebook kernel will run in parallel. Each instance writes to a file in a temporary directory, and then tries to read the other file fromthe temporary directory, so that running them in sequence will fail, but running them in parallel will succed.Two notebooks are launched, each which sets the `this_notebook` variable. One notebook is set to `this_notebook = 'A'` and the other `this_notebook = 'B'`.
###Code
import os
import os.path
import tempfile
import time
# the variable this_notebook is injectected in a cell above by the test framework.
this_notebook = 'A'
other_notebook = 'B'
directory = os.environ['NBEXECUTE_TEST_PARALLEL_TMPDIR']
with open(os.path.join(directory, 'test_file_{}.txt'.format(this_notebook)), 'w') as f:
f.write('Hello from {}'.format(this_notebook))
start = time.time()
timeout = 5
end = start + timeout
target_file = os.path.join(directory, 'test_file_{}.txt'.format(other_notebook))
while time.time() < end:
time.sleep(0.1)
if os.path.exists(target_file):
with open(target_file, 'r') as f:
text = f.read()
if text == 'Hello from {}'.format(other_notebook):
break
else:
assert False, "Timed out – didn't get a message from {}".format(other_notebook)
###Output
_____no_output_____ |
knowledge_network/Using KN to interface with data.gov.au - SAIMOS+excel+example.ipynb | ###Markdown
Reading Excel Data Via the Knowledge Network Python provides plenty of tools for reading and visualising different kinds of tabular data. Here we demonstrate using pandas and xlrd to load and manipulate excel data and the display this data in a matplotlib graph. Using a simple custom widget defined in the 'Filtering Widget.ipynb'.This notebook was developed by the Oznome Project and is available at https://github.com/oznome/jupyter-examples. The filtering widget was produced in conjunction with the Oznome project as part of the CSIRO EUDM project.
###Code
import pandas, xlrd, requests, json
from pandas import np
import matplotlib.pyplot as plt
pandas.set_option('display.max_columns', 500)
%matplotlib inline
###Output
_____no_output_____
###Markdown
We request a data record from the CSIRO Knowledge Network
###Code
response = requests.get("http://kn.csiro.au/api/dataset?id=http%3A%2F%2Foznome.csiro.au%2Fid%2Fdata%2Fdata-gov-au%2Fsaimos-biological-and-flow-cytometry-data-collected-from-ctd-stations-in-south-australia-i-20142")
json_data = response.json()
###Output
_____no_output_____
###Markdown
Exploring the json respone data we can then figure out how to query this record for futher details. First though lets create a class (from https://stackoverflow.com/questions/18873066/pretty-json-formatting-in-ipython-notebook) to make our json easier to explore.
###Code
import uuid
from IPython.display import display_javascript, display_html, display
import json
class RenderJSON(object):
def __init__(self, json_data):
if isinstance(json_data, dict):
self.json_str = json.dumps(json_data)
else:
self.json_str = json_data
self.uuid = str(uuid.uuid4())
def _ipython_display_(self):
display_html('<div id="{}" style="height: 600px; width:100%;"></div>'.format(self.uuid), raw=True)
display_javascript("""
require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() {
document.getElementById('%s').appendChild(renderjson(%s))
});
""" % (self.uuid, self.json_str), raw=True)
json_data = response.json()
RenderJSON(json_data)
###Output
_____no_output_____
###Markdown
By using a python list comprehension we can query a particular excel workbook that is referenced in the metadata record
###Code
url = [resource for resource in json_data["resources"] if "Picophytoplankton" in resource["name"]][0]["url"]
url
###Output
_____no_output_____
###Markdown
Then using requests we can query the specific excel workbook and open it using xlrd
###Code
r = requests.get(url)
book = xlrd.open_workbook(file_contents=r.content)
###Output
_____no_output_____
###Markdown
Then list the various sheets contained in this workbook
###Code
book.sheet_names()
###Output
_____no_output_____
###Markdown
Using pandas we can read one of these sheets and do some exporatory data analysis starting with listing the available columns
###Code
dataframe = pandas.read_excel(url,sheetname='Converted_CLEAN')
dataframe.columns
###Output
_____no_output_____
###Markdown
The dataframe describe function tells us more information about these columns
###Code
dataframe.describe(include='all')
###Output
_____no_output_____
###Markdown
Sometime it is useful to build widgets to help explore data and make a notebook user friendly you can define widgets inline a notebook but it is also easy to define widgets in an adjacent notebook and then run them using %run
###Code
%run Filtering\ Widget.ipynb
###Output
_____no_output_____
###Markdown
The above code creates a widget class called FilteringWidget which we can then use to create a widget
###Code
filtered = FilteringWidget(dataframe, ['Station', 'Depth (m)'])
###Output
_____no_output_____
###Markdown
Interacting with this widget produces a filtered dataframe available by accessing the dataframe property. For example we can restrict the dataframe to the NRSKAI station
###Code
filtered.dataframe
filtered_frame = filtered.dataframe.replace('-', np.nan)
###Output
_____no_output_____
###Markdown
Thinking about what this data might be we can make some experimental assumptions. Perhaps Rep is experiment repeat number. We can further restrict the data frame to particular organism by specifying a columns
###Code
filtered_frame = filtered_frame[[ 'Synechococcus ','Prochlorococus', 'Picoeukaryotes', 'Rep', 'Depth (category)']]
###Output
_____no_output_____
###Markdown
It looks like there is data for all the organisms in "Rep 2" so lets filter to that
###Code
filtered_frame = filtered_frame.loc[filtered_frame['Rep'] == 2]
filtered_frame
###Output
_____no_output_____
###Markdown
The current index looks like a unique identifier for the experiment but we have restricted the data so that depth also provides a unique identifier in our filtered dataset lets get rid of the old index and set the new index to the depth so we can create a visualisation of this data
###Code
filtered_frame = filtered_frame.set_index('Depth (category)')
###Output
_____no_output_____
###Markdown
we also won't need the rep column
###Code
filtered_frame.pop('Rep')
pass
filtered_frame.plot(kind='bar', stacked=True)
###Output
_____no_output_____ |
FashionTrainingLowCost.ipynb | ###Markdown
Setup directorycloning and merging directories together.
###Code
!git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git
!rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
###Output
Cloning into 'In-shop-Clothes-From-Deepfashion'...
remote: Enumerating objects: 4, done.[K
remote: Counting objects: 100% (4/4), done.[K
remote: Compressing objects: 100% (4/4), done.[K
remote: Total 30676 (delta 0), reused 3 (delta 0), pack-reused 30672[K
Receiving objects: 100% (30676/30676), 397.23 MiB | 36.02 MiB/s, done.
Resolving deltas: 100% (16/16), done.
Checking out files: 100% (26451/26451), done.
###Markdown
Define Neural NetworkCreate a ConvNet instance and remove last layer to implement transfert learning.:warning: do not forget to freeze pretrained model reduce training workload.
###Code
#!/usr/bin/env python3
""" Low Cost Transfert Learning on CIBR with Inceptionv3 ConvNet
Description:
============
see this script as a disappointment to me.
Was hoping to correctly use ~~~InceptionV3~~~ VGG16 model by freezing the layers and fitting data generator to train this ConvNet.
The current script collect extracted features from ~~~InceptionV3~~~ VGG16 and names to write Hierarchical Data Format file.
Required setup:
===============
$ git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git
$ rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
Thoses commands clone and merge current Fashion dataset hosted at https://github.com/aryapei/In-shop-Clothes-From-Deepfashion in the same folder ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
"""
import numpy as np
from numpy import linalg as LA
import os
import h5py
from tensorflow.keras.applications import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
class ConvNet:
def __init__(self):
self.model = VGG16(input_shape=(244, 244, 3), weights="imagenet", include_top=False, pooling="max")
self.model.predict(np.zeros((1, 244, 244, 3)))
'''
Use inceptionv3 model to extract features
Output normalized feature vector
'''
def extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(244,244))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
if __name__ == "__main__":
img_dir = "/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN"
img_pattern = f"{img_dir}/**/**/*.jpg"
print(img_pattern)
img_list = glob(img_pattern)
print(f"{' feature extraction starts ':=^120}")
feats = []
names = []
model = ConvNet()
img_list_len = len(img_list)
for i, img_path in enumerate(img_list):
norm_feat = model.extract_feat(img_path)
feats.append(norm_feat)
img_name = '/'.join(img_path.split('/')[-5:])
names.append(img_name)
print(f"({i}/{img_list_len}) feat extraction of {img_name}.")
feats = np.array(feats)
names = np.string_(names)
print(f"{' writing feature extraction results ':=^120}")
h5f = h5py.File("featureCNN.h5", 'w')
h5f.create_dataset('dataset_feat', data=feats)
h5f.create_dataset('dataset_name', data=names)
h5f.close()
import numpy as np
from numpy import linalg as LA
import os
import h5py
from tensorflow.keras.applications import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
class ConvNet:
def __init__(self):
self.model = VGG16(input_shape=(244, 244, 3), weights="imagenet", include_top=False, pooling="max")
self.model.predict(np.zeros((1, 244, 244, 3)))
'''
Use inceptionv3 model to extract features
Output normalized feature vector
'''
def extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(244,244))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
# Read the produced files :
h5f = h5py.File('./featureCNN.h5', 'r')
feats = h5f['dataset_feat'][:]
imgNames = h5f['dataset_name'][:]
h5f.close()
print(f"{' searching starts ':=^120}")
queryDir = '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg'
queryImg = mpimg.imread(queryDir)
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(queryImg)
plt.title("Query Image")
plt.axis('off')
model = ConvNet()
queryVec = model.extract_feat(queryDir)
scores = np.dot(queryVec, feats.T)
rank_ID = np.argsort(scores)[::-1]
rank_score = scores[rank_ID]
# number of top retrieved images to show
maxres = 10
local = "/content/In-shop-Clothes-From-Deepfashion/"
distant = "https://raw.githubusercontent.com/aryapei/In-shop-Clothes-From-Deepfashion/master/"
imlist = [f"{local}{imgNames[index].decode('utf-8')}" for i,index in enumerate(rank_ID[0:maxres])]
print("top %d images in order are: " % maxres, imlist)
plt.imshow(queryImg)
plt.title("search input")
plt.axis('off')
plt.show()
for i, im in enumerate(imlist):
image = mpimg.imread(im)
plt.imshow(image)
plt.title("search output %d" % (i + 1))
plt.axis('off')
plt.show()
###Output
=================================================== searching starts ===================================================
top 10 images in order are: ['/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00001087/02_1_front.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Pants/id_00001799/03_1_front.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00004422/01_4_full.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Tees_Tanks/id_00002848/03_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00001686/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Tees_Tanks/id_00007365/01_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Blouses_Shirts/id_00001915/02_4_full.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00004446/01_4_full.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Rompers_Jumpsuits/id_00001885/01_7_additional.jpg']
|
3- Mathematics and Linear Algebra/Untitled.ipynb | ###Markdown
multiplication
###Code
# initializing matrices
x = np.array([[1, 2], [4, 5]])
y = np.array([[7, 8], [9, 10]])
print (np.add(x,y))
z=print (np.subtract(x,y))
z
print (np.divide(x,y))
print (np.divide(x,2))
z.astype('f')
print (np.multiply(x,y))
###Output
[[ 7 16]
[36 50]]
###Markdown
Vector-Vector Products¶
###Code
x = [1, 2, 3]
y = [4, 5, 6]
np.cross(x, y)
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
print("x:", x)
print("y:", y)
np.dot(x, y)
print("x:", x)
x.shape = (4, 1)
print("xT:", x)
print("y:", y)
y.shape = (4, 1)
print("yT:", y)
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
print("x:", x)
print("y:", y)
print("xT:", x.T)
print("yT:", y.T)
#It only works, if we declare the variables not to be arrays of numbers, but in fact a matrix:
x = np.array([[1, 2, 3, 4]])
y = np.array([[5, 6, 7, 8]])
print("x:", x)
print("y:", y)
print("xT:", x.T)
print("yT:", y.T)
#numpy functions dot and outer are not affected by this distinction.
print("x:", x)
print("y:", y.T)
np.dot(x, y.T)
print("x:", x.T)
print("y:", y)
np.dot(y, x.T)
#To read the result from this array of arrays, we would need to access the value this way:
np.dot(y, x.T)[0][0]
###Output
_____no_output_____
###Markdown
Outer Product of Two Vectors
###Code
x = np.array([[1, 2, 3, 4]])
print("x:", x)
print("xT:", np.reshape(x, (4, 1))) #shape takes 1 variable while reshape takes two commands
print("xT:", x.T)
print("xT:", x.transpose())
x = np.array([[1, 2, 3, 4]])
y = np.array([[5, 6, 7, 8]])
x.T * y
#Numpy provides an outer function that does all that:
np.outer(x,y)
#simple case using the simple arrays for the data structures of the vectors does not affect the result of the outer function
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
np.outer(x, y)
###Output
_____no_output_____
###Markdown
Matrix-Vector Products
###Code
a = np.array([[ 5, 1 ,3], [ 1, 1 ,1], [ 1, 2 ,1]])
b = np.array([1, 2, 3])
print (a.dot(b))
A = np.array([[4, 5, 6],
[7, 8, 9]])
x = np.array([1, 2, 3])
A.dot(x)
#### Matrix-Matrix Products¶
a = [[1, 0], [0, 1]]
b = [[4, 1], [2, 2]]
np.matmul(a, b)
matrix1 = np.matrix(a)
matrix2 = np.matrix(b)
matrix1 + matrix2
matrix1 - matrix2
###Output
_____no_output_____
###Markdown
Multiplication¶
###Code
np.dot(matrix1, matrix2)
matrix1 * matrix2
matrix1.dot(matrix2)
###Output
_____no_output_____
###Markdown
Identity Matrix
###Code
np.identity(3)
np.eye(3)
identy = np.array([[21, 5, 7],[9, 8, 16]])
print("identy:", identy)
identy.shape
np.identity(identy.shape[1], dtype="int")
np.identity(identy.shape[0], dtype="int")
inverse = np.linalg.inv(matrix2)
print(inverse)
inverse = np.linalg.inv(matrix1)
print(inverse)
###Output
[[1. 0.]
[0. 1.]]
###Markdown
Diagonal Matrix
###Code
import numpy as np
A = np.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]])
np.diag(A)
np.diag(A, k=1)
np.diag(A, k=-2)
###Output
_____no_output_____
###Markdown
Transpose of a Matrix
###Code
a = np.array([[1, 2], [3, 4]])
a
a.transpose()
a.T
###Output
_____no_output_____
###Markdown
Symmetric Matrices
###Code
#a symmetric matrix is a square matrix that is equal to its transpose
N = 100
b = np.random.randint(-2000,2000,size=(N,N))
b_symm = (b + b.T)/2
###Output
_____no_output_____
###Markdown
trace
###Code
np.trace(np.eye(3))
print(np.trace(matrix1))
np.trace(matrix1)
det = np.linalg.det(matrix1)
print(det)
###Output
1.0
###Markdown
norms
###Code
v = np.array([1,2,3,4])
norm.median(v)
###Output
_____no_output_____
###Markdown
Linear Independence and Rank
###Code
#How to find linearly independent rows from a matrix
matrix = np.array(
[
[0, 1 ,0 ,0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 0, 1]
])
lambdas, V = np.linalg.eig(matrix.T)
# The linearly dependent row vectors
print (matrix[lambdas == 0,:])
###Output
[[0 1 1 0]]
###Markdown
12- Subtraction and Addition of Metrices¶
###Code
import numpy as np
print("np.arange(9):", np.arange(9))
print("np.arange(9, 18):", np.arange(9, 18))
A = np.arange(9, 18).reshape((3, 3))
B = np.arange(9).reshape((3, 3))
print("A:", A)
print("B:", B)
A + B
A - B
x = np.array([[1,2],[3,4]])
y = np.linalg.inv(x)
print (x )
print (y )
print (np.dot(x,y))
def rvs(dim=3):
random_state = np.random
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
###Output
_____no_output_____
###Markdown
14- Range and Nullspace of a Matrix
###Code
from scipy.linalg import null_space
A = np.array([[1, 1], [1, 1]])
ns = null_space(A)
ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector
ns
###Output
_____no_output_____
###Markdown
15- Determinant
###Code
a = np.array([[1, 2], [3, 4]])
np.linalg.det(a)
###Output
_____no_output_____
###Markdown
Tensor
###Code
import tensorflow as tf
A = tf.Variable(np.zeros((5, 5), dtype=np.float32), trainable=False)
new_part = tf.ones((2,3))
update_A = A[2:4,2:5].assign(new_part)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
print(update_A.eval())
###Output
_____no_output_____
###Markdown
Hyperplane
###Code
##https://stackoverflow.com/questions/46511017/plot-hyperplane-linear-svm-python
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
fig, ax = plt.subplots()
clf2 = svm.LinearSVC(C=1).fit(X, Y)
# get the separating hyperplane
w = clf2.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf2.intercept_[0]) / w[1]
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx2, yy2 = np.meshgrid(np.arange(x_min, x_max, .2),
np.arange(y_min, y_max, .2))
Z = clf2.predict(np.c_[xx2.ravel(), yy2.ravel()])
Z = Z.reshape(xx2.shape)
ax.contourf(xx2, yy2, Z, cmap=plt.cm.coolwarm, alpha=0.3)
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.coolwarm, s=25)
ax.plot(xx,yy)
ax.axis([x_min, x_max,y_min, y_max])
plt.show()
###Output
_____no_output_____ |
notebooks/11_Embeddings.ipynb | ###Markdown
EmbeddingsIn this lesson we will learn how to map tokens to vectors (embeddings) that capture the contextual, semantic and syntactic value of a token in text. View on practicalAI Run in Google Colab View code on GitHub So far, we've also represented our text data in a one-hot encoded form where each token is represented by an n-dimensional array. ```python[[0. 0. 0. ... 0. 0. 0.] [0. 0. 1. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]]```This allows us to preserve the structural information but there are two major disadvantages here. We used character level representations in the CNN lessons because the number of characters is small. Suppose we wanted to one-hot encode each word instead. Now the vocabulary sizes quickly grows leading to large computes. And though we preserve the structure within the text, the actual representation for each token does not preserve any relationship with respect to other tokens.In this notebook, we're going to learn about embeddings and how they address all the shortcomings of the representation methods we've seen so far. Overview * **Objective:** Represent tokens in text that capture the intrinsic semantic relationships.* **Advantages:** * Low-dimensionality while capturing relationships. * Interpretable token representations* **Disadvantages:** None* **Miscellaneous:** There are lot's of pretrained embeddings to choose from but you can also train your own from scratch. Set up
###Code
# Use TensorFlow 2.x
%tensorflow_version 2.x
import os
import numpy as np
import tensorflow as tf
print("GPU Available: ", tf.test.is_gpu_available())
# Arguments
SEED = 1234
SHUFFLE = True
FILTERS = "!\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
LOWER = True
CHAR_LEVEL = False
# Set seed for reproducability
np.random.seed(SEED)
tf.random.set_seed(SEED)
###Output
_____no_output_____
###Markdown
Learning embeddings The main idea of embeddings is to have fixed length representations for the tokens in a text regardless of the number of tokens in the vocabulary. So instead of each token representation having the shape [1 X V] where V is vocab size, each token now has the shape [1 X D] where D is the embedding size (usually 50, 100, 200, 300). The numbers in the representation will no longer be 0s and 1s but rather floats that represent that token in a D-dimensional latent space. If the embeddings really did capture the relationship between tokens, then we should be able to inspect this latent space and confirm known relationships (we'll do this soon).But how do we learn the embeddings the first place? The intuition behind embeddings is that the definition of a token depends on the token itself but on it's context. There are several different ways of doing this:1. Given the word in the context, predict the target word (CBOW - continuous bag of words).2. Given the target word, predict the context word (skip-gram).3. Given a sequence of words, predict the next word (LM - language modeling).All of these approaches involve create data to train our model on. Every word in a sentence becomes the target word and the context words are determines by a window. In the image below (skip-gram), the window size is 2 (2 words to the left and right of the target word). We repeat this for every sentence in our corpus and this results in our training data for the unsupervised task. This in an unsupervised learning technique since we don't have official labels for contexts. The idea is that similar target words will appear with similar contexts and we can learn this relationship by repeatedly training our mode with (context, target) pairs.We can learn embeddings using any of these approaches above and some work better than others. You can inspect the learned embeddings but the best way to choose an approach is to empirically validate the performance on a supervised task. Word2Vec We can learn embeddings by creating our models in TensorFLow but instead, we're going to use a library that specializes in embeddings and topic modeling called [Gensim](https://radimrehurek.com/gensim/).
###Code
import gensim
from gensim.models import KeyedVectors
from gensim.models import FastText
from gensim.test.utils import get_tmpfile
import nltk; nltk.download('punkt')
from tensorflow.keras.preprocessing.text import text_to_word_sequence
import urllib
import warnings; warnings.filterwarnings('ignore')
# Arguments
DATA_FILE = 'harrypotter.txt'
EMBEDDING_DIM = 100
WINDOW = 5
MIN_COUNT = 3 # Ignores all words with total frequency lower than this
SKIP_GRAM = 1 # 0 = CBOW
NEGATIVE_SAMPLING = 20
# Upload data from GitHub to notebook's local drive
url = "https://raw.githubusercontent.com/practicalAI/practicalAI/master/data/harrypotter.txt"
response = urllib.request.urlopen(url)
html = response.read()
with open(DATA_FILE, 'wb') as fp:
fp.write(html)
# Split text into sentences
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
with open(DATA_FILE, encoding='cp1252') as fp:
book = fp.read()
sentences = tokenizer.tokenize(book)
print (len(sentences))
print (sentences[11])
# Preprocess sentences
sentences = [text_to_word_sequence(
text=sentence,
filters=FILTERS,
lower=LOWER,
split=' ') for sentence in sentences]
print (sentences[11])
###Output
['snape', 'nodded', 'but', 'did', 'not', 'elaborate']
###Markdown
When we have large vocabularies to learn embeddings for, things can get complex very quickly. Recall that the backpropagation with softmax updates both the correct and incorrect class weights. This becomes a massive computation for every backwas pass we do so a workaround is to use [negative sampling](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/) which only updates the correct class and a few arbitrary incorrect classes (negative_sampling=20). We're able to do this because of the large amount of training data where we'll see the same word as the target class multiple times.
###Code
# Super fast because of optimized C code under the hood
w2v = gensim.models.Word2Vec(sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (w2v)
# Vector for each word
w2v.wv.get_vector("potter")
# Get nearest neighbors (excluding itself)
w2v.wv.most_similar(positive="scar", topn=5)
# Saving and loading
w2v.wv.save_word2vec_format('model.bin', binary=True)
w2v = KeyedVectors.load_word2vec_format('model.bin', binary=True)
###Output
_____no_output_____
###Markdown
FastText What happen's when a word doesn't exist in our vocabulary? We could assign an UNK token which is used for all OOV (out of vocabulary) words or we could use [FastText](https://radimrehurek.com/gensim/models/fasttext.html), which uses character-level n-grams to embed a word. This helps embed rare words, mispelled words, and also words that don't exist in our corpus but are similar to words in our corpus.
###Code
# Super fast because of optimized C code under the hood
ft = gensim.models.FastText(sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (ft)
# This word doesn't exist so the word2vec model will error out
# w2v.wv.most_similar(positive="scarring", topn=5)
# FastText will use n-grams to embed an OOV word
ft.wv.most_similar(positive="scarring", topn=5)
# Save and loading
ft.wv.save('model.bin')
ft = KeyedVectors.load('model.bin')
###Output
_____no_output_____
###Markdown
Pretrained embeddings We can learn embeddings from scratch using one of the approaches above but we can also leverage pretrained embeddings that have been trained on millions of documents. Popular ones include Word2Vec (skip-gram) or GloVe (global word-word co-occurrence). We can validate that these embeddings captured meaningful semantic relationships by confirming them.
###Code
from gensim.scripts.glove2word2vec import glove2word2vec
from io import BytesIO
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from urllib.request import urlopen
from zipfile import ZipFile
###Output
_____no_output_____
###Markdown
Components
###Code
def plot_embeddings(words, embeddings, pca_results):
for word in words:
index = embeddings.index2word.index(word)
plt.scatter(pca_results[index, 0], pca_results[index, 1])
plt.annotate(word, xy=(pca_results[index, 0], pca_results[index, 1]))
plt.show()
###Output
_____no_output_____
###Markdown
Operations
###Code
# Arguments
EMBEDDING_DIM = 100
# Unzip the file (may take ~3-5 minutes)
resp = urlopen('http://nlp.stanford.edu/data/glove.6B.zip')
zipfile = ZipFile(BytesIO(resp.read()))
zipfile.namelist()
# Write embeddings to file
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
zipfile.extract(embeddings_file)
# Preview of the GloVe embeddings file
with open(embeddings_file, 'r') as fp:
line = next(fp)
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
print (f"word: {word}")
print (f"embedding:\n{embedding}")
print (f"embedding dim: {len(embedding)}")
# Save GloVe embeddings to local directory in word2vec format
word2vec_output_file = '{0}.word2vec'.format(embeddings_file)
glove2word2vec(embeddings_file, word2vec_output_file)
# Load embeddings (may take a minute)
glove = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)
# (king - man) + woman = ?
glove.most_similar(positive=['woman', 'king'], negative=['man'], topn=5)
# Get nearest neighbors (exlcusing itself)
glove.wv.most_similar(positive="goku", topn=5)
# Reduce dimensionality for plotting
X = glove[glove.wv.vocab]
pca = PCA(n_components=2)
pca_results = pca.fit_transform(X)
# Visualize
plot_embeddings(words=["king", "queen", "man", "woman"],
embeddings=glove,
pca_results=pca_results)
# Bias in embeddings
glove.most_similar(positive=['woman', 'doctor'], negative=['man'], topn=5)
###Output
_____no_output_____
###Markdown
Using Embeddings There are several different ways to use embeddings. 1. Use your own trained embeddings (trained on an unsupervised dataset).2. Use pretrained embeddings (GloVe, word2vec, etc.)3. Randomly initialized embeddings.We will explore the different options by revisiting our AGNews classification task. Set up
###Code
# Arguments
SEED = 1234
SHUFFLE = True
DATA_FILE = 'news.csv'
INPUT_FEATURE = 'title'
OUTPUT_FEATURE = 'category'
FILTERS = "!\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
LOWER = True
CHAR_LEVEL = False
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
NUM_EPOCHS = 10
BATCH_SIZE = 64
EMBEDDING_DIM = 100
NUM_FILTERS = 50
FILTER_SIZES = [2, 3, 4]
HIDDEN_DIM = 100
DROPOUT_P = 0.1
LEARNING_RATE = 1e-3
EARLY_STOPPING_CRITERIA = 3
###Output
_____no_output_____
###Markdown
Data We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120000 text samples from 4 unique classes ('Business', 'Sci/Tech', 'Sports', 'World')
###Code
import pandas as pd
import re
import urllib
# Upload data from GitHub to notebook's local drive
url = "https://raw.githubusercontent.com/practicalAI/practicalAI/master/data/news.csv"
response = urllib.request.urlopen(url)
html = response.read()
with open(DATA_FILE, 'wb') as fp:
fp.write(html)
# Load data
df = pd.read_csv(DATA_FILE, header=0)
X = df[INPUT_FEATURE].values
y = df[OUTPUT_FEATURE].values
df.head(5)
###Output
_____no_output_____
###Markdown
Split data
###Code
import collections
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Components
###Code
def train_val_test_split(X, y, val_size, test_size, shuffle):
"""Split data into train/val/test datasets.
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, stratify=y, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=val_size, stratify=y_train, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
###Output
_____no_output_____
###Markdown
Operations
###Code
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, val_size=VAL_SIZE, test_size=TEST_SIZE, shuffle=SHUFFLE)
class_counts = dict(collections.Counter(y))
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"X_train[0]: {X_train[0]}")
print (f"y_train[0]: {y_train[0]}")
print (f"Classes: {class_counts}")
###Output
X_train: (86700,), y_train: (86700,)
X_val: (15300,), y_val: (15300,)
X_test: (18000,), y_test: (18000,)
X_train[0]: Last call for Jack Daniel #39;s?
y_train[0]: Business
Classes: {'Business': 30000, 'Sci/Tech': 30000, 'Sports': 30000, 'World': 30000}
###Markdown
Tokenizer Unlike the previous notebook, we will be processing our text at a word-level (as opposed to character-level).
###Code
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
###Output
_____no_output_____
###Markdown
Components
###Code
def untokenize(indices, tokenizer):
"""Untokenize a list of indices into string."""
return " ".join([tokenizer.index_word[index] for index in indices])
###Output
_____no_output_____
###Markdown
Operations
###Code
# Input vectorizer
X_tokenizer = Tokenizer(filters=FILTERS,
lower=LOWER,
char_level=CHAR_LEVEL,
oov_token='<UNK>')
# Fit only on train data
X_tokenizer.fit_on_texts(X_train)
vocab_size = len(X_tokenizer.word_index) + 1
print (f"# tokens: {vocab_size}")
# Convert text to sequence of tokens
original_text = X_train[0]
X_train = np.array(X_tokenizer.texts_to_sequences(X_train))
X_val = np.array(X_tokenizer.texts_to_sequences(X_val))
X_test = np.array(X_tokenizer.texts_to_sequences(X_test))
preprocessed_text = untokenize(X_train[0], X_tokenizer)
print (f"{original_text} \n\t→ {preprocessed_text} \n\t→ {X_train[0]}")
###Output
Last call for Jack Daniel #39;s?
→ last call for jack daniel 39 s
→ [316, 314, 5, 6877, 10686, 4, 6]
###Markdown
LabelEncoder
###Code
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Operations
###Code
# Output vectorizer
y_tokenizer = LabelEncoder()
# Fit on train data
y_tokenizer = y_tokenizer.fit(y_train)
classes = list(y_tokenizer.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
y_train = y_tokenizer.transform(y_train)
y_val = y_tokenizer.transform(y_val)
y_test = y_tokenizer.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"class counts: {counts},\nclass weights: {class_weights}")
###Output
class counts: [21675 21675 21675 21675],
class weights: {0: 4.61361014994233e-05, 1: 4.61361014994233e-05, 2: 4.61361014994233e-05, 3: 4.61361014994233e-05}
###Markdown
Generators
###Code
import math
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import Sequence
###Output
_____no_output_____
###Markdown
Components
###Code
class DataGenerator(Sequence):
"""Custom data loader."""
def __init__(self, X, y, batch_size, max_filter_size, shuffle=True):
self.X = X
self.y = y
self.batch_size = batch_size
self.max_filter_size = max_filter_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"""# of batches."""
return math.ceil(len(self.X) / self.batch_size)
def __str__(self):
return (f"<DataGenerator(" \
f"batch_size={self.batch_size}, " \
f"batches={len(self)}, " \
f"shuffle={self.shuffle})>")
def __getitem__(self, index):
"""Generate a batch."""
# Gather indices for this batch
batch_indices = self.epoch_indices[
index * self.batch_size:(index+1)*self.batch_size]
# Generate batch data
X, y = self.create_batch(batch_indices=batch_indices)
return X, y
def on_epoch_end(self):
"""Create indices after each epoch."""
self.epoch_indices = np.arange(len(self.X))
if self.shuffle == True:
np.random.shuffle(self.epoch_indices)
def create_batch(self, batch_indices):
"""Generate batch from indices."""
# Get batch data
X = self.X[batch_indices]
y = self.y[batch_indices]
# Pad batch
max_seq_len = max(self.max_filter_size, max([len(x) for x in X]))
X = pad_sequences(X, padding="post", maxlen=max_seq_len)
return X, y
###Output
_____no_output_____
###Markdown
Operations
###Code
# Dataset generator
training_generator = DataGenerator(X=X_train,
y=y_train,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=SHUFFLE)
validation_generator = DataGenerator(X=X_val,
y=y_val,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
testing_generator = DataGenerator(X=X_test,
y=y_test,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
print (f"training_generator: {training_generator}")
print (f"validation_generator: {validation_generator}")
print (f"testing_generator: {testing_generator}")
###Output
training_generator: <DataGenerator(batch_size=64, batches=1355, shuffle=True)>
validation_generator: <DataGenerator(batch_size=64, batches=240, shuffle=False)>
testing_generator: <DataGenerator(batch_size=64, batches=282, shuffle=False)>
###Markdown
Model Let's visualize the model's forward pass.1. We'll first tokenize our inputs (`batch_size`, `max_seq_len`).2. Then we'll embed our tokenized inputs (`batch_size`, `max_seq_len`, `embedding_dim`).3. We'll apply convolution via filters (`filter_size`, `vocab_size`, `num_filters`) followed by batch normalization. Our filters act as character level n-gram detecors. We have three different filter sizes (2, 3 and 4) and they will act as bi-gram, tri-gram and 4-gram feature extractors, respectivelyy. 4. We'll apply 1D global max pooling which will extract the most relevant information from the feature maps for making the decision.5. We feed the pool outputs to a fully-connected (FC) layer (with dropout).6. We use one more FC layer with softmax to derive class probabilities. The `FILTER_SIZES` are [2, 3, 4] which effectively act as bi-gram, tri-gram and 4th-gram feature extractors when applied to our text.
###Code
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import GlobalMaxPool1D
from tensorflow.keras.layers import Input
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Components
###Code
class TextClassificationCNNModel(Model):
def __init__(self, vocab_size, embedding_dim, filter_sizes, num_filters,
hidden_dim, dropout_p, num_classes, freeze_embeddings=False):
super(TextClassificationCNNModel, self).__init__()
# Embeddings
self.embedding = Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
trainable=not freeze_embeddings)
# Convolutional filters
self.convs = []
self.pools = []
for filter_size in filter_sizes:
conv = Conv1D(filters=num_filters, kernel_size=filter_size,
padding='same', activation='relu')
pool = GlobalMaxPool1D(data_format='channels_last')
self.convs.append(conv)
self.pools.append(pool)
# Concatenation
self.concat = Concatenate(axis=1)
# FC layers
self.fc1 = Dense(units=hidden_dim, activation='relu')
self.dropout = Dropout(rate=dropout_p)
self.fc2 = Dense(units=num_classes, activation='softmax')
def call(self, x_in, training=False):
"""Forward pass."""
# Embed
x_emb = self.embedding(x_in)
# Convolutions
convs = []
for i in range(len(self.convs)):
z = self.convs[i](x_emb)
z = self.pools[i](z)
convs.append(z)
# Concatenate
z_cat = self.concat(convs)
# FC
z = self.fc1(z_cat)
if training:
z = self.dropout(z, training=training)
y_pred = self.fc2(z)
return y_pred
def sample(self, input_shape):
x = Input(shape=input_shape)
return Model(inputs=x, outputs=self.call(x)).summary()
###Output
_____no_output_____
###Markdown
GloVe embeddings Components
###Code
def load_glove_embeddings(embeddings_file):
"""Load embeddings from a file."""
embeddings = {}
with open(embeddings_file, "r") as fp:
for index, line in enumerate(fp):
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings[word] = embedding
return embeddings
def make_embeddings_matrix(embeddings, word_index, embedding_dim):
"""Create embeddings matrix to use in Embedding layer."""
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
###Output
_____no_output_____
###Markdown
Operations
###Code
# Create embeddings
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
glove_embeddings = load_glove_embeddings(embeddings_file=embeddings_file)
embedding_matrix = make_embeddings_matrix(embeddings=glove_embeddings,
word_index=X_tokenizer.word_index,
embedding_dim=EMBEDDING_DIM)
print (f"<Embeddings(words={embedding_matrix.shape[0]}, dim={embedding_matrix.shape[1]})>")
###Output
<Embeddings(words=29917, dim=100)>
###Markdown
Experiments Once you have chosen your embeddings, you can choose to freeze them or continue to train them using the supervised data (this could lead to overfitting). In this example, we will do three experiments: * frozen GloVe embeddings* fine-tuned (unfrozen) GloVe embeddings* randomly initialized embeddings
###Code
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import TensorBoard
%load_ext tensorboard
###Output
_____no_output_____
###Markdown
GloVe embeddings (frozen)
###Code
# Arguments
FREEZE_EMBEDDINGS = True
# Initialize model
glove_frozen_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
glove_frozen_model.sample(input_shape=(10,))
# Set embeddings
glove_frozen_model.layers[0].set_weights([embedding_matrix])
# Compile
glove_frozen_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/glove_frozen', histogram_freq=1, update_freq='epoch')]
# Training
training_history = glove_frozen_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = glove_frozen_model.evaluate_generator(generator=testing_generator,
verbose=1)
###Output
282/282 [==============================] - 6s 22ms/step - loss: 0.3690 - accuracy: 0.8684
###Markdown
Fine-tuned GloVe embeddings (unfrozen)
###Code
# Arguments
FREEZE_EMBEDDINGS = False
# Initialize model
glove_finetuned_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
glove_finetuned_model.sample(input_shape=(10,))
# Set embeddings
glove_finetuned_model.layers[0].set_weights([embedding_matrix])
# Compile
glove_finetuned_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/glove_finetuned', histogram_freq=1, update_freq='epoch')]
# Training
training_history = glove_finetuned_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = glove_finetuned_model.evaluate_generator(generator=testing_generator,
verbose=1)
###Output
282/282 [==============================] - 6s 21ms/step - loss: 0.3710 - accuracy: 0.8728
###Markdown
Randomly initialized embeddings
###Code
# Arguments
FREEZE_EMBEDDINGS = False
random_initialized_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
# Compile
random_initialized_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/randomly_initialized', histogram_freq=1, update_freq='epoch')]
# Training
training_history = random_initialized_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = random_initialized_model.evaluate_generator(generator=testing_generator,
verbose=1)
%tensorboard --logdir tensorboard
###Output
_____no_output_____
###Markdown
Complete evaluation Looks like fine-tuned glove embeddings had the best test performance so let's do proper evaluation and inference with that strategy.
###Code
model = glove_finetuned_model
import io
import itertools
import json
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
###Output
_____no_output_____
###Markdown
Components
###Code
def plot_confusion_matrix(y_true, y_pred, classes, cmap=plt.cm.Blues):
"""Plot a confusion matrix using ground truth and predictions."""
# Confusion matrix
cm = confusion_matrix(y_true, y_pred)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Figure
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
fig.colorbar(cax)
# Axis
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
ax.set_xticklabels([''] + classes)
ax.set_yticklabels([''] + classes)
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
# Values
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, f"{cm[i, j]:d} ({cm_norm[i, j]*100:.1f}%)",
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# Display
plt.show()
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics. """
performance = {'overall': {}, 'class': {}}
y_pred = np.argmax(y_pred, axis=1)
metrics = precision_recall_fscore_support(y_true, y_pred)
# Overall performance
performance['overall']['precision'] = np.mean(metrics[0])
performance['overall']['recall'] = np.mean(metrics[1])
performance['overall']['f1'] = np.mean(metrics[2])
performance['overall']['num_samples'] = np.float64(np.sum(metrics[3]))
# Per-class performance
for i in range(len(classes)):
performance['class'][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i])
}
return performance
###Output
_____no_output_____
###Markdown
Operations
###Code
# Evaluation
test_history = model.evaluate_generator(generator=testing_generator, verbose=1)
y_pred = model.predict_generator(generator=testing_generator, verbose=1)
print (f"test history: {test_history}")
# Class performance
performance = get_performance(y_true=y_test,
y_pred=y_pred,
classes=classes)
print (json.dumps(performance, indent=4))
# Confusion matrix
plt.rcParams["figure.figsize"] = (7,7)
y_pred = np.argmax(y_pred, axis=1)
plot_confusion_matrix(y_test, y_pred, classes=classes)
print (classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Inference
###Code
import collections
###Output
_____no_output_____
###Markdown
Components
###Code
def get_probability_distributions(probabilities, classes):
"""Produce probability distributions with labels."""
probability_distributions = []
for i, y_prob in enumerate(probabilities):
probability_distribution = {}
for j, prob in enumerate(y_prob):
probability_distribution[classes[j]] = np.float64(prob)
probability_distribution = collections.OrderedDict(
sorted(probability_distribution.items(), key=lambda kv: kv[1], reverse=True))
probability_distributions.append(probability_distribution)
return probability_distributions
###Output
_____no_output_____
###Markdown
Operations
###Code
# Inputs
texts = ["This weekend the greatest tennis players will fight for the championship."]
num_samples = len(texts)
X_infer = np.array(X_tokenizer.texts_to_sequences(texts))
print (f"{texts[0]} \n\t→ {untokenize(X_infer[0], X_tokenizer)} \n\t→ {X_infer[0]}")
print (f"len(X_infer[0]): {len(X_infer[0])} words")
y_filler = np.array([0]*num_samples)
# Inference data generator
inference_generator = DataGenerator(X=X_infer,
y=y_filler,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
# Predict
probabilities = model.predict_generator(generator=inference_generator,
verbose=1)
# Results
probability_distributions = get_probability_distributions(probabilities=probabilities,
classes=y_tokenizer.classes_)
results = []
for index in range(num_samples):
results.append({
'raw_input': texts[index],
'preprocessed_input': untokenize(indices=X_infer[index], tokenizer=X_tokenizer),
'tokenized_input': str(X_infer[index]),
'probabilities': probability_distributions[index]
})
print (json.dumps(results, indent=4))
###Output
[
{
"raw_input": "This weekend the greatest tennis players will fight for the championship.",
"preprocessed_input": "this weekend the greatest tennis players will fight for the championship",
"tokenized_input": "[ 272 2283 10 6450 878 370 60 238 5 10 1465]",
"probabilities": {
"Sports": 0.7571110129356384,
"World": 0.2408323436975479,
"Sci/Tech": 0.0012546397047117352,
"Business": 0.0008020797977223992
}
}
]
###Markdown
Interpretability Recall that each our unique filter sizes (2, 3 and 4) act as n-gram feature detectors. When these filters convolve on our embedded input (`N`, `max_seq_len`, `embedding_dim`), they produce feature maps which are shape ((`N`, `max_seq_len`, `num_filters`) for each filter size. Since we used `SAME` padding with stride=1, our feature maps have the same length as our inputs ('max_seq_len') which you can think of as what the filters extracted from each n-gram window. When we apply 1d global max-pooling we're effectively extracting the most relevant information from the feature maps. We can inspect the trained model at the pooling step to determine which n-grams were most relevant towards the prediction.
###Code
import seaborn as sns
from statistics import mode
###Output
_____no_output_____
###Markdown
Components We're going to copy the same model structure as before but now we'll stop just after convolution since those are the outputs we care about.
###Code
class ConvOutputsModels(Model):
def __init__(self, vocab_size, embedding_dim, filter_sizes, num_filters):
super(ConvOutputsModels, self).__init__()
# Embeddings
self.embedding = Embedding(input_dim=vocab_size,
output_dim=embedding_dim)
# Convolutional filters
self.convs = []
for filter_size in filter_sizes:
conv = Conv1D(filters=num_filters, kernel_size=filter_size,
padding='same', activation='relu')
self.convs.append(conv)
def call(self, x_in, training=False):
"""Forward pass."""
# Embed
x_emb = self.embedding(x_in)
# Convolutions
convs = []
for i in range(len(self.convs)):
z = self.convs[i](x_emb)
convs.append(z)
return convs
def sample(self, input_shape):
x = Input(shape=input_shape)
return Model(inputs=x, outputs=self.call(x)).summary()
###Output
_____no_output_____
###Markdown
Operations
###Code
# Initialize model
conv_layer_outputs_model = ConvOutputsModels(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS)
conv_layer_outputs_model.sample(input_shape=(10,))
###Output
Model: "model_6"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_8 (InputLayer) [(None, 10)] 0
__________________________________________________________________________________________________
embedding_8 (Embedding) (None, 10, 100) 2991700 input_8[0][0]
__________________________________________________________________________________________________
conv1d_24 (Conv1D) (None, 10, 50) 10050 embedding_8[0][0]
__________________________________________________________________________________________________
conv1d_25 (Conv1D) (None, 10, 50) 15050 embedding_8[0][0]
__________________________________________________________________________________________________
conv1d_26 (Conv1D) (None, 10, 50) 20050 embedding_8[0][0]
==================================================================================================
Total params: 3,036,850
Trainable params: 3,036,850
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Since we already trained our model, we'll transfer those weights to our new model.
###Code
# Model's layers
conv_layer_outputs_model.layers
# Set embeddings weights
conv_layer_outputs_model.layers[0].set_weights(model.layers[0].get_weights())
# Set conv weights
conv_layer_start_num = 1
for layer_num in range(conv_layer_start_num, conv_layer_start_num + len(FILTER_SIZES)):
conv_layer_outputs_model.layers[layer_num].set_weights(model.layers[layer_num].get_weights())
# Forward pass
conv_outputs = conv_layer_outputs_model.predict_generator(generator=inference_generator,
verbose=1)
print (len(conv_outputs)) # each filter_size has feature maps
print (conv_outputs[0].shape)
conv_outputs[0].shape
# Visualize bi-gram filters
tokens = untokenize(X_infer[0], X_tokenizer).split()
sns.heatmap(conv_outputs[0][0].T, xticklabels=tokens)
###Output
_____no_output_____
###Markdown
1D global max-pooling would extract the highest value from each of our num_filters for each filter size. We could also follow this same approach to figure out which n-gram is most relevant but notice in the heatmap above that many filters don't have much variance. To mitigate this, this [paper](https://www.aclweb.org/anthology/W18-5408/) uses threshold values to determine which filters to use for interpretability. To keep things simple and since the feature map values are fairly normalized, we'll just take the sum of values for each token index and use the index that has the max value as th emost influential index.
###Code
sample_index = 0
print (f"Preprocessed text:\n{untokenize(indices=X_infer[sample_index], tokenizer=X_tokenizer)}")
print ("\nMost important n-grams:")
# Process conv outputs for each unique filter size
for i, filter_size in enumerate(FILTER_SIZES):
# Identify most important n-gram
filter_sums = np.sum(conv_outputs[i][sample_index], axis=1)
# Get corresponding text
start = np.argmax(filter_sums)
gram = " ".join([X_tokenizer.index_word[index] for index in X_infer[sample_index][start:start+filter_size]])
print (f"[{filter_size}-gram]: {gram}")
###Output
Preprocessed text:
this weekend the greatest tennis players will fight for the championship
Most important n-grams:
[2-gram]: tennis players
[3-gram]: tennis players will
[4-gram]: championship
###Markdown
EmbeddingsIn this lesson we will learn how to map tokens to vectors (embeddings) that capture the contextual, semantic and syntactic value of a token in text. View on practicalAI Run in Google Colab View code on GitHub So far, we've also represented our text data in a one-hot encoded form where each token is represented by an n-dimensional array. ```python[[0. 0. 0. ... 0. 0. 0.] [0. 0. 1. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]]```This allows us to preserve the structural information but there are two major disadvantages here. We used character level representations in the CNN lessons because the number of characters is small. Suppose we wanted to one-hot encode each word instead. Now the vocabulary sizes quickly grows leading to large computes. And though we preserve the structure within the text, the actual representation for each token does not preserve any relationship with respect to other tokens.In this notebook, we're going to learn about embeddings and how they address all the shortcomings of the representation methods we've seen so far. Overview * **Objective:** Represent tokens in text that capture the intrinsic semantic relationships.* **Advantages:** * Low-dimensionality while capturing relationships. * Interpretable token representations* **Disadvantages:** None* **Miscellaneous:** There are lot's of pretrained embeddings to choose from but you can also train your own from scratch. Set up
###Code
# Use TensorFlow 2.x
%tensorflow_version 2.x
import os
import numpy as np
import tensorflow as tf
print("GPU Available: ", tf.test.is_gpu_available())
# Arguments
SEED = 1234
SHUFFLE = True
FILTERS = "!\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
LOWER = True
CHAR_LEVEL = False
# Set seed for reproducability
np.random.seed(SEED)
tf.random.set_seed(SEED)
###Output
_____no_output_____
###Markdown
Learning embeddings The main idea of embeddings is to have fixed length representations for the tokens in a text regardless of the number of tokens in the vocabulary. So instead of each token representation having the shape [1 X V] where V is vocab size, each token now has the shape [1 X D] where D is the embedding size (usually 50, 100, 200, 300). The numbers in the representation will no longer be 0s and 1s but rather floats that represent that token in a D-dimensional latent space. If the embeddings really did capture the relationship between tokens, then we should be able to inspect this latent space and confirm known relationships (we'll do this soon).But how do we learn the embeddings the first place? The intuition behind embeddings is that the definition of a token depends on the token itself but on it's context. There are several different ways of doing this:1. Given the word in the context, predict the target word (CBOW - continuous bag of words).2. Given the target word, predict the context word (skip-gram).3. Given a sequence of words, predict the next word (LM - language modeling).All of these approaches involve create data to train our model on. Every word in a sentence becomes the target word and the context words are determines by a window. In the image below (skip-gram), the window size is 2 (2 words to the left and right of the target word). We repeat this for every sentence in our corpus and this results in our training data for the unsupervised task. This in an unsupervised learning technique since we don't have official labels for contexts. The idea is that similar target words will appear with similar contexts and we can learn this relationship by repeatedly training our mode with (context, target) pairs.We can learn embeddings using any of these approaches above and some work better than others. You can inspect the learned embeddings but the best way to choose an approach is to empirically validate the performance on a supervised task. Word2Vec We can learn embeddings by creating our models in TensorFLow but instead, we're going to use a library that specializes in embeddings and topic modeling called [Gensim](https://radimrehurek.com/gensim/).
###Code
import gensim
from gensim.models import KeyedVectors
from gensim.models import FastText
from gensim.test.utils import get_tmpfile
import nltk; nltk.download('punkt')
from tensorflow.keras.preprocessing.text import text_to_word_sequence
import urllib
import warnings; warnings.filterwarnings('ignore')
# Arguments
DATA_FILE = 'harrypotter.txt'
EMBEDDING_DIM = 100
WINDOW = 5
MIN_COUNT = 3 # Ignores all words with total frequency lower than this
SKIP_GRAM = 1 # 0 = CBOW
NEGATIVE_SAMPLING = 20
# Upload data from GitHub to notebook's local drive
url = "https://raw.githubusercontent.com/practicalAI/practicalAI/master/data/harrypotter.txt"
response = urllib.request.urlopen(url)
html = response.read()
with open(DATA_FILE, 'wb') as fp:
fp.write(html)
# Split text into sentences
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
with open(DATA_FILE, encoding='cp1252') as fp:
book = fp.read()
sentences = tokenizer.tokenize(book)
print (len(sentences))
print (sentences[11])
# Preprocess sentences
sentences = [text_to_word_sequence(
text=sentence,
filters=FILTERS,
lower=LOWER,
split=' ') for sentence in sentences]
print (sentences[11])
###Output
['snape', 'nodded', 'but', 'did', 'not', 'elaborate']
###Markdown
When we have large vocabularies to learn embeddings for, things can get complex very quickly. Recall that the backpropagation with softmax updates both the correct and incorrect class weights. This becomes a massive computation for every backwas pass we do so a workaround is to use [negative sampling](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/) which only updates the correct class and a few arbitrary incorrect classes (negative_sampling=20). We're able to do this because of the large amount of training data where we'll see the same word as the target class multiple times.
###Code
# Super fast because of optimized C code under the hood
w2v = gensim.models.Word2Vec(sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (w2v)
# Vector for each word
w2v.wv.get_vector("potter")
# Get nearest neighbors (excluding itself)
w2v.wv.most_similar(positive="scar", topn=5)
# Saving and loading
w2v.wv.save_word2vec_format('model.bin', binary=True)
w2v = KeyedVectors.load_word2vec_format('model.bin', binary=True)
###Output
_____no_output_____
###Markdown
FastText What happen's when a word doesn't exist in our vocabulary? We could assign an UNK token which is used for all OOV (out of vocabulary) words or we could use [FastText](https://radimrehurek.com/gensim/models/fasttext.html), which uses character-level n-grams to embed a word. This helps embed rare words, mispelled words, and also words that don't exist in our corpus but are similar to words in our corpus.
###Code
# Super fast because of optimized C code under the hood
ft = gensim.models.FastText(sentences=sentences, size=EMBEDDING_DIM,
window=WINDOW, min_count=MIN_COUNT,
sg=SKIP_GRAM, negative=NEGATIVE_SAMPLING)
print (ft)
# This word doesn't exist so the word2vec model will error out
# w2v.wv.most_similar(positive="scarring", topn=5)
# FastText will use n-grams to embed an OOV word
ft.wv.most_similar(positive="scarring", topn=5)
# Save and loading
ft.wv.save('model.bin')
ft = KeyedVectors.load('model.bin')
###Output
_____no_output_____
###Markdown
Pretrained embeddings We can learn embeddings from scratch using one of the approaches above but we can also leverage pretrained embeddings that have been trained on millions of documents. Popular ones include Word2Vec (skip-gram) or GloVe (global word-word co-occurrence). We can validate that these embeddings captured meaningful semantic relationships by confirming them.
###Code
from gensim.scripts.glove2word2vec import glove2word2vec
from io import BytesIO
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from urllib.request import urlopen
from zipfile import ZipFile
###Output
_____no_output_____
###Markdown
Components
###Code
def plot_embeddings(words, embeddings, pca_results):
for word in words:
index = embeddings.index2word.index(word)
plt.scatter(pca_results[index, 0], pca_results[index, 1])
plt.annotate(word, xy=(pca_results[index, 0], pca_results[index, 1]))
plt.show()
###Output
_____no_output_____
###Markdown
Operations
###Code
# Arguments
EMBEDDING_DIM = 100
# Unzip the file (may take ~3-5 minutes)
resp = urlopen('http://nlp.stanford.edu/data/glove.6B.zip')
zipfile = ZipFile(BytesIO(resp.read()))
zipfile.namelist()
# Write embeddings to file
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
zipfile.extract(embeddings_file)
# Preview of the GloVe embeddings file
with open(embeddings_file, 'r') as fp:
line = next(fp)
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
print (f"word: {word}")
print (f"embedding:\n{embedding}")
print (f"embedding dim: {len(embedding)}")
# Save GloVe embeddings to local directory in word2vec format
word2vec_output_file = '{0}.word2vec'.format(embeddings_file)
glove2word2vec(embeddings_file, word2vec_output_file)
# Load embeddings (may take a minute)
glove = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)
# (king - man) + woman = ?
glove.most_similar(positive=['woman', 'king'], negative=['man'], topn=5)
# Get nearest neighbors (exlcusing itself)
glove.wv.most_similar(positive="goku", topn=5)
# Reduce dimensionality for plotting
X = glove[glove.wv.vocab]
pca = PCA(n_components=2)
pca_results = pca.fit_transform(X)
# Visualize
plot_embeddings(words=["king", "queen", "man", "woman"],
embeddings=glove,
pca_results=pca_results)
# Bias in embeddings
glove.most_similar(positive=['woman', 'doctor'], negative=['man'], topn=5)
###Output
_____no_output_____
###Markdown
Using Embeddings There are several different ways to use embeddings. 1. Use your own trained embeddings (trained on an unsupervised dataset).2. Use pretrained embeddings (GloVe, word2vec, etc.)3. Randomly initialized embeddings.We will explore the different options by revisiting our AGNews classification task. Set up
###Code
# Arguments
SEED = 1234
SHUFFLE = True
DATA_FILE = 'news.csv'
INPUT_FEATURE = 'title'
OUTPUT_FEATURE = 'category'
FILTERS = "!\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
LOWER = True
CHAR_LEVEL = False
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
NUM_EPOCHS = 10
BATCH_SIZE = 64
EMBEDDING_DIM = 100
NUM_FILTERS = 50
FILTER_SIZES = [2, 3, 4]
HIDDEN_DIM = 100
DROPOUT_P = 0.1
LEARNING_RATE = 1e-3
EARLY_STOPPING_CRITERIA = 3
###Output
_____no_output_____
###Markdown
Data We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120000 text samples from 4 unique classes ('Business', 'Sci/Tech', 'Sports', 'World')
###Code
import pandas as pd
import re
import urllib
# Upload data from GitHub to notebook's local drive
url = "https://raw.githubusercontent.com/practicalAI/practicalAI/master/data/news.csv"
response = urllib.request.urlopen(url)
html = response.read()
with open(DATA_FILE, 'wb') as fp:
fp.write(html)
# Load data
df = pd.read_csv(DATA_FILE, header=0)
X = df[INPUT_FEATURE].values
y = df[OUTPUT_FEATURE].values
df.head(5)
###Output
_____no_output_____
###Markdown
Split data
###Code
import collections
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Components
###Code
def train_val_test_split(X, y, val_size, test_size, shuffle):
"""Split data into train/val/test datasets.
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, stratify=y, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=val_size, stratify=y_train, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
###Output
_____no_output_____
###Markdown
Operations
###Code
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, val_size=VAL_SIZE, test_size=TEST_SIZE, shuffle=SHUFFLE)
class_counts = dict(collections.Counter(y))
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"X_train[0]: {X_train[0]}")
print (f"y_train[0]: {y_train[0]}")
print (f"Classes: {class_counts}")
###Output
X_train: (86700,), y_train: (86700,)
X_val: (15300,), y_val: (15300,)
X_test: (18000,), y_test: (18000,)
X_train[0]: Last call for Jack Daniel #39;s?
y_train[0]: Business
Classes: {'Business': 30000, 'Sci/Tech': 30000, 'Sports': 30000, 'World': 30000}
###Markdown
Tokenizer Unlike the previous notebook, we will be processing our text at a word-level (as opposed to character-level).
###Code
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
###Output
_____no_output_____
###Markdown
Components
###Code
def untokenize(indices, tokenizer):
"""Untokenize a list of indices into string."""
return " ".join([tokenizer.index_word[index] for index in indices])
###Output
_____no_output_____
###Markdown
Operations
###Code
# Input vectorizer
X_tokenizer = Tokenizer(filters=FILTERS,
lower=LOWER,
char_level=CHAR_LEVEL,
oov_token='<UNK>')
# Fit only on train data
X_tokenizer.fit_on_texts(X_train)
vocab_size = len(X_tokenizer.word_index) + 1
print (f"# tokens: {vocab_size}")
# Convert text to sequence of tokens
original_text = X_train[0]
X_train = np.array(X_tokenizer.texts_to_sequences(X_train))
X_val = np.array(X_tokenizer.texts_to_sequences(X_val))
X_test = np.array(X_tokenizer.texts_to_sequences(X_test))
preprocessed_text = untokenize(X_train[0], X_tokenizer)
print (f"{original_text} \n\t→ {preprocessed_text} \n\t→ {X_train[0]}")
###Output
Last call for Jack Daniel #39;s?
→ last call for jack daniel 39 s
→ [316, 314, 5, 6877, 10686, 4, 6]
###Markdown
LabelEncoder
###Code
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Operations
###Code
# Output vectorizer
y_tokenizer = LabelEncoder()
# Fit on train data
y_tokenizer = y_tokenizer.fit(y_train)
classes = list(y_tokenizer.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
y_train = y_tokenizer.transform(y_train)
y_val = y_tokenizer.transform(y_val)
y_test = y_tokenizer.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"class counts: {counts},\nclass weights: {class_weights}")
###Output
class counts: [21675 21675 21675 21675],
class weights: {0: 4.61361014994233e-05, 1: 4.61361014994233e-05, 2: 4.61361014994233e-05, 3: 4.61361014994233e-05}
###Markdown
Generators
###Code
import math
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import Sequence
###Output
_____no_output_____
###Markdown
Components
###Code
class DataGenerator(Sequence):
"""Custom data loader."""
def __init__(self, X, y, batch_size, max_filter_size, shuffle=True):
self.X = X
self.y = y
self.batch_size = batch_size
self.max_filter_size = max_filter_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"""# of batches."""
return math.ceil(len(self.X) / self.batch_size)
def __str__(self):
return (f"<DataGenerator(" \
f"batch_size={self.batch_size}, " \
f"batches={len(self)}, " \
f"shuffle={self.shuffle})>")
def __getitem__(self, index):
"""Generate a batch."""
# Gather indices for this batch
batch_indices = self.epoch_indices[
index * self.batch_size:(index+1)*self.batch_size]
# Generate batch data
X, y = self.create_batch(batch_indices=batch_indices)
return X, y
def on_epoch_end(self):
"""Create indices after each epoch."""
self.epoch_indices = np.arange(len(self.X))
if self.shuffle == True:
np.random.shuffle(self.epoch_indices)
def create_batch(self, batch_indices):
"""Generate batch from indices."""
# Get batch data
X = self.X[batch_indices]
y = self.y[batch_indices]
# Pad batch
max_seq_len = max(self.max_filter_size, max([len(x) for x in X]))
X = pad_sequences(X, padding="post", maxlen=max_seq_len)
return X, y
###Output
_____no_output_____
###Markdown
Operations
###Code
# Dataset generator
training_generator = DataGenerator(X=X_train,
y=y_train,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=SHUFFLE)
validation_generator = DataGenerator(X=X_val,
y=y_val,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
testing_generator = DataGenerator(X=X_test,
y=y_test,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
print (f"training_generator: {training_generator}")
print (f"validation_generator: {validation_generator}")
print (f"testing_generator: {testing_generator}")
###Output
training_generator: <DataGenerator(batch_size=64, batches=1355, shuffle=True)>
validation_generator: <DataGenerator(batch_size=64, batches=240, shuffle=False)>
testing_generator: <DataGenerator(batch_size=64, batches=282, shuffle=False)>
###Markdown
Model Let's visualize the model's forward pass.1. We'll first tokenize our inputs (`batch_size`, `max_seq_len`).2. Then we'll embed our tokenized inputs (`batch_size`, `max_seq_len`, `embedding_dim`).3. We'll apply convolution via filters (`filter_size`, `vocab_size`, `num_filters`) followed by batch normalization. Our filters act as character level n-gram detecors. We have three different filter sizes (2, 3 and 4) and they will act as bi-gram, tri-gram and 4-gram feature extractors, respectivelyy. 4. We'll apply 1D global max pooling which will extract the most relevant information from the feature maps for making the decision.5. We feed the pool outputs to a fully-connected (FC) layer (with dropout).6. We use one more FC layer with softmax to derive class probabilities. The `FILTER_SIZES` are [2, 3, 4] which effectively act as bi-gram, tri-gram and 4th-gram feature extractors when applied to our text.
###Code
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import GlobalMaxPool1D
from tensorflow.keras.layers import Input
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Components
###Code
class TextClassificationCNNModel(Model):
def __init__(self, vocab_size, embedding_dim, filter_sizes, num_filters,
hidden_dim, dropout_p, num_classes, freeze_embeddings=False):
super(TextClassificationCNNModel, self).__init__()
# Embeddings
self.embedding = Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
trainable=not freeze_embeddings)
# Convolutional filters
self.convs = []
self.pools = []
for filter_size in filter_sizes:
conv = Conv1D(filters=num_filters, kernel_size=filter_size,
padding='same', activation='relu')
pool = GlobalMaxPool1D(data_format='channels_last')
self.convs.append(conv)
self.pools.append(pool)
# Concatenation
self.concat = Concatenate(axis=1)
# FC layers
self.fc1 = Dense(units=hidden_dim, activation='relu')
self.dropout = Dropout(rate=dropout_p)
self.fc2 = Dense(units=num_classes, activation='softmax')
def call(self, x_in, training=False):
"""Forward pass."""
# Embed
x_emb = self.embedding(x_in)
# Convolutions
convs = []
for i in range(len(self.convs)):
z = self.convs[i](x_emb)
z = self.pools[i](z)
convs.append(z)
# Concatenate
z_cat = self.concat(convs)
# FC
z = self.fc1(z_cat)
if training:
z = self.dropout(z, training=training)
y_pred = self.fc2(z)
return y_pred
def sample(self, input_shape):
x = Input(shape=input_shape)
return Model(inputs=x, outputs=self.call(x)).summary()
###Output
_____no_output_____
###Markdown
GloVe embeddings Components
###Code
def load_glove_embeddings(embeddings_file):
"""Load embeddings from a file."""
embeddings = {}
with open(embeddings_file, "r") as fp:
for index, line in enumerate(fp):
values = line.split()
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings[word] = embedding
return embeddings
def make_embeddings_matrix(embeddings, word_index, embedding_dim):
"""Create embeddings matrix to use in Embedding layer."""
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
###Output
_____no_output_____
###Markdown
Operations
###Code
# Create embeddings
embeddings_file = 'glove.6B.{0}d.txt'.format(EMBEDDING_DIM)
glove_embeddings = load_glove_embeddings(embeddings_file=embeddings_file)
embedding_matrix = make_embeddings_matrix(embeddings=glove_embeddings,
word_index=X_tokenizer.word_index,
embedding_dim=EMBEDDING_DIM)
print (f"<Embeddings(words={embedding_matrix.shape[0]}, dim={embedding_matrix.shape[1]})>")
###Output
<Embeddings(words=29917, dim=100)>
###Markdown
Experiments Once you have chosen your embeddings, you can choose to freeze them or continue to train them using the supervised data (this could lead to overfitting). In this example, we will do three experiments: * frozen GloVe embeddings* fine-tuned (unfrozen) GloVe embeddings* randomly initialized embeddings
###Code
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import TensorBoard
%load_ext tensorboard
###Output
_____no_output_____
###Markdown
GloVe embeddings (frozen)
###Code
# Arguments
FREEZE_EMBEDDINGS = True
# Initialize model
glove_frozen_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
glove_frozen_model.sample(input_shape=(10,))
# Set embeddings
glove_frozen_model.layers[0].set_weights([embedding_matrix])
# Compile
glove_frozen_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/glove_frozen', histogram_freq=1, update_freq='epoch')]
# Training
training_history = glove_frozen_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = glove_frozen_model.evaluate_generator(generator=testing_generator,
verbose=1)
###Output
282/282 [==============================] - 6s 22ms/step - loss: 0.3690 - accuracy: 0.8684
###Markdown
Fine-tuned GloVe embeddings (unfrozen)
###Code
# Arguments
FREEZE_EMBEDDINGS = False
# Initialize model
glove_finetuned_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
glove_finetuned_model.sample(input_shape=(10,))
# Set embeddings
glove_finetuned_model.layers[0].set_weights([embedding_matrix])
# Compile
glove_finetuned_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/glove_finetuned', histogram_freq=1, update_freq='epoch')]
# Training
training_history = glove_finetuned_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = glove_finetuned_model.evaluate_generator(generator=testing_generator,
verbose=1)
###Output
282/282 [==============================] - 6s 21ms/step - loss: 0.3710 - accuracy: 0.8728
###Markdown
Randomly initialized embeddings
###Code
# Arguments
FREEZE_EMBEDDINGS = False
random_initialized_model = TextClassificationCNNModel(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS,
hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P,
num_classes=len(classes),
freeze_embeddings=FREEZE_EMBEDDINGS)
# Compile
random_initialized_model.compile(optimizer=Adam(lr=LEARNING_RATE),
loss=SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Callbacks
callbacks = [EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_CRITERIA, verbose=1, mode='min'),
ReduceLROnPlateau(patience=1, factor=0.1, verbose=0),
TensorBoard(log_dir='tensorboard/randomly_initialized', histogram_freq=1, update_freq='epoch')]
# Training
training_history = random_initialized_model.fit_generator(generator=training_generator,
epochs=NUM_EPOCHS,
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
class_weight=class_weights,
verbose=1)
# Evaluation
testing_history = random_initialized_model.evaluate_generator(generator=testing_generator,
verbose=1)
%tensorboard --logdir tensorboard
###Output
_____no_output_____
###Markdown
Complete evaluation Looks like fine-tuned glove embeddings had the best test performance so let's do proper evaluation and inference with that strategy.
###Code
model = glove_finetuned_model
import io
import itertools
import json
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
###Output
_____no_output_____
###Markdown
Components
###Code
def plot_confusion_matrix(y_true, y_pred, classes, cmap=plt.cm.Blues):
"""Plot a confusion matrix using ground truth and predictions."""
# Confusion matrix
cm = confusion_matrix(y_true, y_pred)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Figure
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
fig.colorbar(cax)
# Axis
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
ax.set_xticklabels([''] + classes)
ax.set_yticklabels([''] + classes)
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
# Values
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, f"{cm[i, j]:d} ({cm_norm[i, j]*100:.1f}%)",
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# Display
plt.show()
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics. """
performance = {'overall': {}, 'class': {}}
y_pred = np.argmax(y_pred, axis=1)
metrics = precision_recall_fscore_support(y_true, y_pred)
# Overall performance
performance['overall']['precision'] = np.mean(metrics[0])
performance['overall']['recall'] = np.mean(metrics[1])
performance['overall']['f1'] = np.mean(metrics[2])
performance['overall']['num_samples'] = np.float64(np.sum(metrics[3]))
# Per-class performance
for i in range(len(classes)):
performance['class'][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i])
}
return performance
###Output
_____no_output_____
###Markdown
Operations
###Code
# Evaluation
test_history = model.evaluate_generator(generator=testing_generator, verbose=1)
y_pred = model.predict_generator(generator=testing_generator, verbose=1)
print (f"test history: {test_history}")
# Class performance
performance = get_performance(y_true=y_test,
y_pred=y_pred,
classes=classes)
print (json.dumps(performance, indent=4))
# Confusion matrix
plt.rcParams["figure.figsize"] = (7,7)
y_pred = np.argmax(y_pred, axis=1)
plot_confusion_matrix(y_test, y_pred, classes=classes)
print (classification_report(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Inference
###Code
import collections
###Output
_____no_output_____
###Markdown
Components
###Code
def get_probability_distributions(probabilities, classes):
"""Produce probability distributions with labels."""
probability_distributions = []
for i, y_prob in enumerate(probabilities):
probability_distribution = {}
for j, prob in enumerate(y_prob):
probability_distribution[classes[j]] = np.float64(prob)
probability_distribution = collections.OrderedDict(
sorted(probability_distribution.items(), key=lambda kv: kv[1], reverse=True))
probability_distributions.append(probability_distribution)
return probability_distributions
###Output
_____no_output_____
###Markdown
Operations
###Code
# Inputs
texts = ["This weekend the greatest tennis players will fight for the championship."]
num_samples = len(texts)
X_infer = np.array(X_tokenizer.texts_to_sequences(texts))
print (f"{texts[0]} \n\t→ {untokenize(X_infer[0], X_tokenizer)} \n\t→ {X_infer[0]}")
print (f"len(X_infer[0]): {len(X_infer[0])} characters")
y_filler = np.array([0]*num_samples)
# Inference data generator
inference_generator = DataGenerator(X=X_infer,
y=y_filler,
batch_size=BATCH_SIZE,
max_filter_size=max(FILTER_SIZES),
shuffle=False)
# Predict
probabilities = model.predict_generator(generator=inference_generator,
verbose=1)
# Results
probability_distributions = get_probability_distributions(probabilities=probabilities,
classes=y_tokenizer.classes_)
results = []
for index in range(num_samples):
results.append({
'raw_input': texts[index],
'preprocessed_input': untokenize(indices=X_infer[index], tokenizer=X_tokenizer),
'tokenized_input': str(X_infer[index]),
'probabilities': probability_distributions[index]
})
print (json.dumps(results, indent=4))
###Output
[
{
"raw_input": "This weekend the greatest tennis players will fight for the championship.",
"preprocessed_input": "this weekend the greatest tennis players will fight for the championship",
"tokenized_input": "[ 272 2283 10 6450 878 370 60 238 5 10 1465]",
"probabilities": {
"Sports": 0.7571110129356384,
"World": 0.2408323436975479,
"Sci/Tech": 0.0012546397047117352,
"Business": 0.0008020797977223992
}
}
]
###Markdown
Interpretability Recall that each our unique filter sizes (2, 3 and 4) act as n-gram feature detectors. When these filters convolve on our embedded input (`N`, `max_seq_len`, `embedding_dim`), they produce feature maps which are shape ((`N`, `max_seq_len`, `num_filters`) for each filter size. Since we used `SAME` padding with stride=1, our feature maps have the same length as our inputs ('max_seq_len') which you can think of as what the filters extracted from each n-gram window. When we apply 1d global max-pooling we're effectively extracting the most relevant information from the feature maps. We can inspect the trained model at the pooling step to determine which n-grams were most relevant towards the prediction.
###Code
import seaborn as sns
from statistics import mode
###Output
_____no_output_____
###Markdown
Components We're going to copy the same model structure as before but now we'll stop just after convolution since those are the outputs we care about.
###Code
class ConvOutputsModels(Model):
def __init__(self, vocab_size, embedding_dim, filter_sizes, num_filters):
super(ConvOutputsModels, self).__init__()
# Embeddings
self.embedding = Embedding(input_dim=vocab_size,
output_dim=embedding_dim)
# Convolutional filters
self.convs = []
for filter_size in filter_sizes:
conv = Conv1D(filters=num_filters, kernel_size=filter_size,
padding='same', activation='relu')
self.convs.append(conv)
def call(self, x_in, training=False):
"""Forward pass."""
# Embed
x_emb = self.embedding(x_in)
# Convolutions
convs = []
for i in range(len(self.convs)):
z = self.convs[i](x_emb)
convs.append(z)
return convs
def sample(self, input_shape):
x = Input(shape=input_shape)
return Model(inputs=x, outputs=self.call(x)).summary()
###Output
_____no_output_____
###Markdown
Operations
###Code
# Initialize model
conv_layer_outputs_model = ConvOutputsModels(vocab_size=vocab_size,
embedding_dim=EMBEDDING_DIM,
filter_sizes=FILTER_SIZES,
num_filters=NUM_FILTERS)
conv_layer_outputs_model.sample(input_shape=(10,))
###Output
Model: "model_6"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_8 (InputLayer) [(None, 10)] 0
__________________________________________________________________________________________________
embedding_8 (Embedding) (None, 10, 100) 2991700 input_8[0][0]
__________________________________________________________________________________________________
conv1d_24 (Conv1D) (None, 10, 50) 10050 embedding_8[0][0]
__________________________________________________________________________________________________
conv1d_25 (Conv1D) (None, 10, 50) 15050 embedding_8[0][0]
__________________________________________________________________________________________________
conv1d_26 (Conv1D) (None, 10, 50) 20050 embedding_8[0][0]
==================================================================================================
Total params: 3,036,850
Trainable params: 3,036,850
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
Since we already trained our model, we'll transfer those weights to our new model.
###Code
# Model's layers
conv_layer_outputs_model.layers
# Set embeddings weights
conv_layer_outputs_model.layers[0].set_weights(model.layers[0].get_weights())
# Set conv weights
conv_layer_start_num = 1
for layer_num in range(conv_layer_start_num, conv_layer_start_num + len(FILTER_SIZES)):
conv_layer_outputs_model.layers[layer_num].set_weights(model.layers[layer_num].get_weights())
# Forward pass
conv_outputs = conv_layer_outputs_model.predict_generator(generator=inference_generator,
verbose=1)
print (len(conv_outputs)) # each filter_size has feature maps
print (conv_outputs[0].shape)
conv_outputs[0].shape
# Visualize bi-gram filters
tokens = untokenize(X_infer[0], X_tokenizer).split()
sns.heatmap(conv_outputs[0][0].T, xticklabels=tokens)
###Output
_____no_output_____
###Markdown
1D global max-pooling would extract the highest value from each of our num_filters for each filter size. We could also follow this same approach to figure out which n-gram is most relevant but notice in the heatmap above that many filters don't have much variance. To mitigate this, this [paper](https://www.aclweb.org/anthology/W18-5408/) uses threshold values to determine which filters to use for interpretability. To keep things simple and since the feature map values are fairly normalized, we'll just take the sum of values for each token index and use the index that has the max value as th emost influential index.
###Code
sample_index = 0
print (f"Preprocessed text:\n{untokenize(indices=X_infer[sample_index], tokenizer=X_tokenizer)}")
print ("\nMost important n-grams:")
# Process conv outputs for each unique filter size
for i, filter_size in enumerate(FILTER_SIZES):
# Identify most important n-gram
filter_sums = np.sum(conv_outputs[i][sample_index], axis=1)
# Get corresponding text
start = np.argmax(filter_sums)
gram = " ".join([X_tokenizer.index_word[index] for index in X_infer[sample_index][start:start+filter_size]])
print (f"[{filter_size}-gram]: {gram}")
###Output
Preprocessed text:
this weekend the greatest tennis players will fight for the championship
Most important n-grams:
[2-gram]: tennis players
[3-gram]: tennis players will
[4-gram]: championship
|
2017/Contributed-Talks/09_sanders/Annualized Rate of Mass Shootings; Sanders & Lei (StanCon2017 - revised).ipynb | ###Markdown
Modeling the Time Evolution of the Annualized Rate of Public Mass Shootings with Gaussian ProcessesNathan Sanders, Victor Lei (Legendary Entertainment)January, 2017 AbstractMuch of the public policy debate over gun control and gun rights in the United States hinges on the alarming incidence of public mass shootings, here defined as attacks killing four or more victims. Several times in recent years, individual, highly salient public mass shooting incidents have galvanized public discussion of reform efforts. But deliberative legislative action proceeds over a much longer timescale that should be informed by knowledge of the long term evolution of these events. We have used *Stan* to develop a new model for the annualized rate of public mass shootings in the United States based on a Gaussian process with a time-varying mean function. This design yields a predictive model with the full non-parametric flexibility of a Gaussian process, while retaining the direct interpretability of a parametric model for long-term evolution of the mass shooting rate. We apply this model to the Mother Jones database of public mass shootings and explore the posterior consequences of different prior choices and of correlations between hyperparameters. We reach conclusions about the long term evolution of the rate of public mass shootings in the United States and short-term periods deviating from this trend. BackgroundTragic, high profile public events over the past few years like the shootings at the Washington Navy Yard; the Emanuel AME Church in Charleston; San Bernadino, CA; and Orlando, FL have raised public awareness of the dangers posed by public mass shooting events and sociological interest in understanding the motivations and occurrence rates of such events. There is no commonly accepted definition of a public mass shooting, but such an event is generally understood to be the simultaneous homicide of multiple people perpetrated by an individual or coordinated group via firearm. A particular question facing elevated public, political, and scholarly scrutiny is whether the rate of public mass shootings has increased significantly over recent years. Lott (2014) responded to a [September, 2013 FBI report](https://www.fbi.gov/news/stories/2014/september/fbi-releases-study-on-active-shooter-incidents/pdfs/a-study-of-active-shooter-incidents-in-the-u.s.-between-2000-and-2013) on public mass shootings by re-evaluating sources of bias, reviewing data consistency, and redefining the period under consideration to conclude that no statistically significant increase is identifiable. Lott's work has been the subject of persistent controversy (see e.g. Johnson et al. 2012). In contrast, Cohen et al. (2014) claim that the rate of public mass shootings tripled over the four year period 2011-2014 based on a Statistical Process Control (SPC) analysis of the duration between successive events.In this study, we present a new statistical approach to evaluating the time evolution of the rate of public mass shootings. We do not present original data on occurrences in the United States, address the myriad considerations inherent in defining a "mass shooting" event, or seek to resolve the causal issues of why the growth rate may have changed over time. We do adopt a commonly cited public mass shooting dataset and definition from Mother Jones.We develop a Gaussian process-based model for the time evolution of the occurrence rate of public mass shootings and demonstrate inference under this model by straightforward application of the probabilistic programming language *Stan*. We use this case to explore the intersection of parametric and non-parametric models. We seek to merge a parametric model, with straightforward interpretations of posterior marginalized parameter inferences, with a non-parametric model that captures and permits discovery of unspecified trends. *Stan's* flexible modeling language permits rapid model design and iteration, while the No-U-Turn sampler allows us to fully explore the model posterior and understand the dependence between the parametric and non-parametric components of our model and the implications of our prior assumptions.In the following notebook, we describe the Mother Jones dataset on US public mass shootings and lay out our statistical model and inference scheme. We then discuss the results from this inference, how they depend on choices for the prior distribution, and explore correlations between hyperparameters. Finally, we discuss the conclusions that can be reached from inspection of the marginal posterior distributions.
###Code
## Notebook setup
%matplotlib inline
import pandas as pd
import numpy as np
import pickle, os, copy
import scipy
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.ticker import FixedLocator, MaxNLocator, AutoMinorLocator
## NOTE: We encounter an error with this model using PyStan 2.14,
## so for now we will wrap cmdstan using stanhelper instead.
#import pystan
## See https://github.com/akucukelbir/stanhelper
import stanhelper
import subprocess
cmdstan_path = os.path.expanduser('~/Stan/cmdstan_2.14.0/')
from scipy import stats as sstats
###Output
_____no_output_____
###Markdown
Package versions
###Code
%load_ext watermark
%watermark -v -m -p pandas,numpy,scipy,matplotlib,pystan
print subprocess.check_output(cmdstan_path+'bin/stanc --version', shell=1)
###Output
stanc version 2.14.0
###Markdown
DataFor this study, we consider the [database published by Mother Jones](http://www.motherjones.com/politics/2012/12/mass-shootings-mother-jones-full-data) (retrieved for this study on October 16, 2016; as of January 14, 2017, Mother Jones had not added any further events to its database for 2016), compiling incidents of public mass shootings in the United States from 1982 through the end of 2016. The database includes rich (quantitative and qualitative) metadata on the effects of the incidents, the mental health condition of the perpetrators, weapon type, how the perpetrators obtained their weapons, and more; however, we focus primarily on the dates of incident occurrence. The definition of a public mass shooting is not universally agreed upon, and even when a firm definition is adopted there can be ambiguity in how to apply it to the complex and uncertain circumstances of these chaotic events. See Fox & Levin (2015) for a recent discussion. The criteria for inclusion in the Mother Jones database were described in a [2014 article by Mark Follman](http://www.motherjones.com/politics/2014/10/mass-shootings-rising-harvard): > [The database] includes attacks in public places with four or more victims killed, a baseline established by the FBI a decade ago. We excluded mass murders in private homes related to domestic violence, as well as shootings tied to gang or other criminal activity.''Follman discusses their motivations for these criteria and provide some examples of prominent incidents excluded by the criteria, such as the shooting at Ft. Hood in April, 2014. Note that the federal threshold for investigation of public mass shootings was lowered to three victim fatalities in January of 2013, and the Mother Jones database includes shootings under this more expansive definition starting from that date. To maintain a consistent definition for public mass shootings throughout the studied time period, we only consider shootings with four or more victim fatalities.Our primary dataset is the count of incidents reported in this database per calendar year. We include incidents labeled as both "Mass" or "Spree" by Mother Jones.
###Code
## Load data
data = pd.read_excel('MotherJonesData_2016_10_16.xlsx','US mass shootings')
## Stadardize on definition of fatalities at 4. Mother Jones changed it to 3 in 2013.
data = data[data.Fatalities > 3]
## Prepare data
# Aggregate data anually
data_annual = data.groupby('Year')
# Count cases by year and fill in empty years
cases_resamp = data_annual.count().Case.ix[np.arange(1982,2017)].fillna(0)
# Enumerate years in range
data_years = cases_resamp.index.values
# Enumerate quarters across daterange for later plotting
data_years_samp = np.arange(min(data_years), max(data_years)+10, .25)
# Format for Stan
stan_data = {
'N1': len(cases_resamp),
'x1': data_years - min(data_years),
'z1': cases_resamp.values.astype(int),
'N2': len(data_years_samp),
'x2': data_years_samp - min(data_years),
}
## Print the stan model inputs
for key in stan_data:
print key
print stan_data[key]
print '\n'
## Number of years with data
print len(stan_data['x1'])
## Number of interpolated points to do prediction for
print len(stan_data['x2'])
###Output
176
###Markdown
Statistical ModelWe adopt a univariate Gaussian process model (see e.g. Rasmussen & Williams 2006) as a non-parametric description of the time evolution of the annualized occurrence rate. The Gaussian process describes deviations from a mean function by a covariance matrix that controls the probability of the deviation as a function of the time differential between points. Robers et al. (2012) surveyed applications of Gaussian process models to timeseries data, and explored the implications of different choices for the mean and covariance functions.We adopt the following system of units for the Gaussian Process model. The time vector $x$ is measured in years since 1982 and the outcome vector $z$ is defined as the number of occurrences per year.Many applications of Gaussian processes adopt a constant, zero mean function. In that case, the relationship between the dependent variable(s) and the predictors is described entirely by the non-parametric family of functions generated from the Gaussian process covariance function. We adopt a linear mean function and a squared-exponential covariance function. The mean function $\mu(x)$ is simply: \begin{equation}\mu(x) = \mu_0 + \mu_b~x\end{equation}Note that we use a logarithmic parameterization of the likelihood for the occurence rate (see below), so the linear mean function corresponds to an exponential function for the evolution of the rate of shootings per year. The familiar squared-exponential covariance function, which generates infinitely-differentiable functions from the Gaussian process, is:\begin{equation}k(x)_{i,j} = \eta^2~exp \big( -\rho^2 \sum_{d=1}^{D}(x_{i,d} - x_{j,d})^2 \big) + \delta_{i,j}~\sigma^2\end{equation}where the hyperparameter $\eta$ controls the overall strength of covariance, $\rho$ controls the timescale over which functions drawn from the process vary, and $\sigma$ controls the baseline level of variance.Our likelihood assumes that the occurrence rate is specified by exponentiated draws of the occurrence rate $y$ from the mean and covariance functions, and the observed outcome data is negative binomial-distributed according to the rate.\begin{align}y(x) \sim \rm{N}(\mu(x), k(x)^2) \\z(x) ~ \sim \rm{NB}(exp(y(x)), \phi)\end{align}where $\rm{N}$ is the normal (parameterized by the standard deviation rather than the variance, per *Stan* standard syntax) and $\rm{NB}$ is the negative binomial distribution. We use the "alternative" parameterization of the negative binomial distribution described in the *Stan* manual, where the second parameter directly scales the overdispersion relative to a Poisson distribution. While we choose the negative binomial to permit overdispersion in the annualized mass shooting rate beyond counting noise, as we will see, the data provide strong evidence for small values of $\phi^{-1}$, consistent with Poisson noise.The role of each component of the Gaussian process will depend largely on the timescale parameter $\rho$. When the timescale is short, the model effectively divides the response into a long-term (timescale of the range of the data; in this case, decades) parametric effect and a short-term (timescale of e.g. years) non-parametric effect. This approach gives us the full flexibility of the Gaussian process for predictive applications, while still allowing us to make interpretable, parametric inferences on the long-term evolution of the system. We apply the following prior and hyperprior distributions to provide weak information about the scale of the relevant parameters in the adopted unit system:\begin{align*}\rho^{-1} \sim \Gamma(\alpha_{\rho}, \beta_{\rho}) \\\eta^2 \sim \rm{C}(2. 5) \\\sigma^2 \sim \rm{C}(0, 2.5) \\\mu_0 \sim \rm{N}(0, 2) \\\mu_b \sim \rm{N}(0, 0.2) \\\phi^{-1} \sim C(0, 5)\end{align*}where $\Gamma$ is the gamma distribution; $\rm{C}$ is the half-Cauchy distribution; the parameters $\eta^2$, $\sigma^2$, and $\phi^{-1}$ are constrained to be positive; and we apply the constraint $\rho^{-1} > 1$ to enforce timescales $>1$ yr (the spacing of our data).Below we explore different choices for the $\alpha$ and $\beta$ parameters of the gamma hyperprior on $\rho^{-1}$, labeled as $\alpha_{\rho}$ and $\beta_{\rho}$. In particular, we explore $(\alpha_{\rho},\beta_{\rho}) = (4,1)$ and $(1,1/100)$. These correspond to prior distributions with standard deviations of $2$ and $100$ years, respectively. On top of the linear trend in the mean function, the former represents a strong prior expectation that the annualized rate of public mass shootings evolves on a timescale of a few years, and the latter represents a nearly-flat expectation for variations on timescales from a few years to a few centuries.We implement the Gaussian process model in *Stan*, adapting the logistic classification example in Section 14.5 of the *Stan* manual. *Stan's* *NUTS* sampler performs full joint Bayesian estimation of all parameters, including the mean function parameters $\mu_0$ and $\mu_b$ and the Gaussian Process hyperparmeters $\eta$, $\rho$, and $\sigma$ and the negative binomial over-dispersion $\phi^{-1}$. The $\alpha_{\rho}$ and $\beta_{\rho}$ hyperparameters of the $\rho$ hyperprior distribution are fixed. We use the Cholesky factor transformed implementation of the normal distribution to calculate the likelihood. We expect these hyperparameters to be at least somewhat correlated and not well-identified, introducing significant curvature in the model posterior, indicating that Hamiltonian Monte Carlo (HMC) would be a particularly effective sampling strategy for this model (Betancourt & Girolami 2013). We fit the model to the 35 annual observations of the Mother Jones dataset and do model interpolation and prediction over a grid of 176 quarters from 1980 to 2024. We typically fit 8 independent chains of length 2000 iterations (following an equal number of NUTS warmup samples) in parallel using *Stan* and observe a typical execution time of ~1 min. For the purposes of this notebook, we obtain a larger number of samples by fitting 20 chains of 4000 samples in order to improve the resolution of 2D posterior histograms.
###Code
with open('gp_model_final.stan', 'r') as f:
stan_code = f.read()
print stan_code
###Output
data {
int<lower=1> N1;
vector[N1] x1;
int z1[N1];
int<lower=1> N2;
vector[N2] x2;
real<lower=0> alpha_rho;
real<lower=0> beta_rho;
}
transformed data {
int<lower=1> N;
vector[N1+N2] x;
// cov_exp_quad wants real valued inputs
real rx[N1+N2];
real rx1[N1];
real rx2[N2];
N = N1 + N2;
x = append_row(x1, x2);
rx = to_array_1d(x);
rx1 = to_array_1d(x1);
rx2 = to_array_1d(x2);
}
parameters {
vector[N1] y_tilde1;
real<lower=0> eta_sq;
real<lower=1> inv_rho;
real<lower=0> sigma_sq;
real mu_0;
real mu_b;
real<lower=0> NB_phi_inv;
}
model {
vector[N1] mu1;
vector[N1] y1;
matrix[N1,N1] Sigma1;
matrix[N1,N1] L1;
// Calculate mean function
mu1 = mu_0 + mu_b * x1;
// GP hyperpriors
eta_sq ~ cauchy(0, 1);
sigma_sq ~ cauchy(0, 1);
inv_rho ~ gamma(alpha_rho, beta_rho); // Gamma prior with mean of 4 and std of 2
// Calculate covariance matrix using new optimized function
Sigma1 = cov_exp_quad(rx1, sqrt(eta_sq), sqrt(0.5) * inv_rho);
for (n in 1:N1) Sigma1[n,n] = Sigma1[n,n] + sigma_sq;
// Decompose
L1 = cholesky_decompose(Sigma1);
// We're using a the non-centered parameterization, so rescale y_tilde
y1 = mu1 + L1 * y_tilde1;
// Mean model priors
mu_0 ~ normal(0, 2);
mu_b ~ normal(0, 0.2);
// Negative-binomial prior
// For neg_binomial_2, phi^-1 controls the overdispersion.
// phi^-1 ~ 0 reduces to the poisson. phi^-1 = 1 represents variance = mu+mu^2
NB_phi_inv ~ cauchy(0, 5);
// Generate non-centered parameterization
y_tilde1 ~ normal(0, 1);
// Likelihood
z1 ~ neg_binomial_2_log(y1, inv(NB_phi_inv));
}
generated quantities {
vector[N1] y1;
vector[N2] y2;
vector[N] y;
int z_rep[N];
{
// Don't save these parameters
matrix[N,N] Sigma;
matrix[N,N] L;
vector[N] y_tilde;
Sigma = cov_exp_quad(rx, sqrt(eta_sq), sqrt(0.5) * inv_rho);
for (n in 1:N) Sigma[n,n] = Sigma[n,n] + sigma_sq;
for (n in 1:N1) y_tilde[n] = y_tilde1[n];
for (n in (N1 + 1):N) y_tilde[n] = normal_rng(0,1);
// Decompose
L = cholesky_decompose(Sigma);
y = mu_0 + mu_b * x + L * y_tilde;
for (n in 1:N1) y1[n] = y[n];
for (n in 1:N2) y2[n] = y[N1+n];
for (n in 1:N) z_rep[n] = neg_binomial_2_log_rng(y[n], inv(NB_phi_inv));
}
}
###Markdown
Note that we use the newly introduced *cov_exp_quad* function to implement the squared exponential covariance function, and we rescale $\rho^{-1}$ by $2^{-1/2}$ to accomodate the difference between this implementation and our definition above. Moreover, we use a non-centered parameterization (see e.g. Papaspiliopoulos et al. 2003) for the Gaussian process, modeling the latent parameter $\tilde{y}$ as standard normal and then transforming to a sampled value for $y$ by rescaling by the covariance matrix. Model fitting
###Code
## Compile using pystan
#stan_model_compiled = pystan.StanModel(model_code=stan_code)
### Compile using cmdstan
### Script expects cmdstan installation at cmdstan_path
subprocess.call("mkdir "+cmdstan_path+"user-models", shell=1)
subprocess.call("cp gp_model_final.stan " + cmdstan_path+"user-models/", shell=1)
subprocess.call("make user-models/gp_model_final", cwd=cmdstan_path, shell=1)
###Output
_____no_output_____
###Markdown
Below we explore the consequences of different choices for the prior distribution on $\rho^{-1}$. To facilitate that analysis, here we fit the model twice with two different hyperparameter specifications provided as data. We will visualize and discuss these hyperprior choices in the next section. When not explicitly making comparisons between the two models, we focus on the model with the stronger prior on $\rho^{-1}$.
###Code
## Sampling parameters
Nchains = 20
Niter = 8000
cdic = {'max_treedepth': 15, 'adapt_delta': 0.95}
## Sample with strong prior on rho
stan_data_rho_strong = copy.copy(stan_data)
stan_data_rho_strong['alpha_rho'] = 4
stan_data_rho_strong['beta_rho'] = 1
## Sample with pystan
#stan_model_samp_rho_strong = stan_model_compiled.sampling(
# data = stan_data_rho_strong, iter=Niter,
# chains=Nchains, control=cdic, seed=1002
# )
## Sample with cmdstan
## Delete any old samples first
os.system('rm output_cmdstan_gp_rhostrong_samples*.csv')
stanhelper.stan_rdump(stan_data_rho_strong, 'input_data_rhostrong_final.R')
p = []
for i in range(Nchains):
cmd = """
{0}user-models/gp_model_final \
data file='input_data_rhostrong_final.R' \
sample num_warmup={2} num_samples={2} \
adapt delta={4} \
algorithm=hmc engine=nuts max_depth={3} \
random seed=1002 id={1} \
output file=output_cmdstan_gp_rhostrong_samples{1}.csv
""".format(cmdstan_path, i+1, Niter/2, cdic['max_treedepth'], cdic['adapt_delta'])
p += [subprocess.Popen(cmd, shell=True)]
## Don't move on until sampling is complete.
for i in range(Nchains):
p[i].wait()
## Write out results if using pystan
#stan_model_ext_rho_strong = stan_model_samp_rho_strong.extract()
#with open('stan_model_ext_rho_strong.p','w') as f: pickle.dump(stan_model_ext_rho_strong,f)
## Sample with weak prior on rho
stan_data_rho_weak = copy.copy(stan_data)
stan_data_rho_weak['alpha_rho'] = 1
stan_data_rho_weak['beta_rho'] = 1/100.
## Sample with pystan
#stan_model_samp_rho_weak = stan_model_compiled.sampling(data = stan_data_rho_weak, iter=Niter, chains=Nchains, control=cdic)
## Sample with cmdstan
## Delete any old samples first
os.system('rm output_cmdstan_gp_rhoweak_samples*.csv')
stanhelper.stan_rdump(stan_data_rho_weak, 'input_data_rhoweak_final.R')
p = []
for i in range(Nchains):
cmd = """
{0}user-models/gp_model_final \
data file='input_data_rhoweak_final.R' \
sample num_warmup={2} num_samples={2} \
adapt delta={4} \
algorithm=hmc engine=nuts max_depth={3} \
random seed=1002 id={1} \
output file=output_cmdstan_gp_rhoweak_samples{1}.csv
""".format(cmdstan_path, i+1, Niter/2, cdic['max_treedepth'], cdic['adapt_delta'])
p += [subprocess.Popen(cmd, shell=True)]
## Don't move on until sampling is complete.
for i in range(Nchains):
p[i].wait()
## Write out results if using pystan
#stan_model_ext_rho_weak = stan_model_samp_rho_weak.extract()
#with open('stan_model_ext_rho_weak.p','w') as f: pickle.dump(stan_model_ext_rho_weak,f)
def stan_read_csv_multi(path):
"""
Wrap the stanhelper.stan_read_csv function to load outputs
from multiple chains.
Parameters:
* path: file path for cmdstan output files including wildcard (*)
"""
## Enumerate files
from glob import glob
files = glob(path)
## Read in each file
result = {}
for file in files:
result[file] = stanhelper.stan_read_csv(file)
## Combine dictionaries
result_out = {}
keys = result[files[0]]
for key in keys:
result_out[key] = result[files[0]][key]
for f in files:
result_out[key] = np.append(result_out[key], result[f][key], axis=0)
## Remove extraneous dimension
for key in keys:
if result_out[key].shape[-1] == 1:
result_out[key] = np.squeeze(result_out[key], -1)
return result_out
stan_model_ext_rho_strong = stan_read_csv_multi('output_cmdstan_gp_rhostrong_samples*.csv')
stan_model_ext_rho_weak = stan_read_csv_multi('output_cmdstan_gp_rhoweak_samples*.csv')
###Output
_____no_output_____
###Markdown
The MCMC trace illustrates the high independence of samples achieved after the *NUTS* algorithm warm-up period, and the low variance in sampling distributions between chains.
###Code
## Traceplot
trace_pars = [('eta_sq','$\\eta^2$'),
('inv_rho','$\\rho^{-1}$'),
('sigma_sq','$\\sigma^2$'),
('mu_0','$\\mu_0$'),
('mu_b','$\\mu_b$'),
('NB_phi_inv','$\\rm{NB}_\\phi^{-1}$')]
fig,axs = plt.subplots(len(trace_pars),2, figsize=(8,8), sharex='all', sharey='row')
exts = [stan_model_ext_rho_strong, stan_model_ext_rho_weak]
exts_names = [r'Strong $\rho$ prior', r'Weak $\rho$ prior']
for j in range(2):
axs[0,j].set_title(exts_names[j])
for i,par in enumerate(trace_pars):
axs[i,j].plot(exts[j][par[0]], color='.5')
if j==0: axs[i,j].set_ylabel(par[1])
for k in range(1, Nchains+1):
axs[i,j].axvline(Niter/2 * k, c='r', zorder=-1)
axs[len(trace_pars) - 1,j].set_xticks(np.arange(0, (Niter/2)*Nchains+1, Niter*2))
###Output
_____no_output_____
###Markdown
We assess MCMC convergence quantitatively using the Gelman-Rubin convergence diagnostic, $\hat{R}$, a comparison of within- to between-chain variance. We find that $\hat{R} \ll 1.05$ for all parameters, indicating a negligable discrepancy in the sampling distributions between chains.
###Code
def read_stansummary(path, cmdstan_path=cmdstan_path):
"""
Wrapper for the cmdstan program stan_summary to calculate
sampling summary statistics across multiple MCMC chains.
Args:
path (str): Path, with a wildcard (*) for the id number
of each output chain
cmdstan_path (str): Path to the stan home directory
Returns:
out: A pandas dataframe with the summary statistics provided
by stan_summary. Note that each element of array variables
are provided on separate lines
"""
from StringIO import StringIO
summary_string = subprocess.check_output(cmdstan_path + 'bin/stansummary --sig_figs=5 '+path, shell=1)
out = pd.read_table(StringIO(summary_string), sep='\s+', header=4, skip_footer=6, engine='python')
return out
## Use cmdstan's stansummary command to calculate rhat
stan_model_sum_rho_strong = read_stansummary('output_cmdstan_gp_rhostrong*.csv')
stan_model_sum_rho_weak = read_stansummary('output_cmdstan_gp_rhoweak*.csv')
## Get summary statistics using pystan
#model_summary = stan_model_samp_rho_strong.summary()
#Rhat_vec = model_summary['summary'][:,array(model_summary['summary_colnames'])=='Rhat']
#pars = model_summary['summary_rownames']
## Get summary statistics using cmdstan wrapper
model_summary = stan_model_sum_rho_strong
Rhat_vec = stan_model_sum_rho_strong['R_hat'].values
pars = stan_model_sum_rho_strong.index
## Replace y1, y2 with summaries
sel_pars = ['y1', 'y2', u'eta_sq', u'inv_rho', u'sigma_sq', u'mu_0', u'mu_b', 'NB_phi_inv']
Rhat_dic = {}
for spar in sel_pars:
if spar in ('y1','y2'):
sel = np.where([True if p.startswith(spar) else False for p in pars])
Rhat_dic[spar] = np.percentile(Rhat_vec[sel], [5,50,95])
else:
Rhat_dic[spar] = [Rhat_vec[[pars==spar]],]*3
plt.figure(figsize=(5,6))
plt.errorbar(np.array(Rhat_dic.values())[:,1], np.arange(len(sel_pars)), \
xerr= [np.array(Rhat_dic.values())[:,1] - np.array(Rhat_dic.values())[:,0],\
np.array(Rhat_dic.values())[:,2] - np.array(Rhat_dic.values())[:,1]],\
capsize=0, marker='o', color='k', lw=0)
plt.yticks(np.arange(len(sel_pars)), Rhat_dic.keys(), size=11)
plt.xlabel('$\hat{R}$')
plt.axvline(1.0, color='.5', ls='solid', zorder=-2)
plt.axvline(1.05, color='.5', ls='dashed', zorder=-2)
plt.ylim(-.5, len(sel_pars)-.5)
plt.xlim(0.99, 1.06)
###Output
_____no_output_____
###Markdown
Posterior Simulations and Predictive ChecksTo assess goodness of fit, we inspect simulated draws of the Gaussian process from the posterior and perform posterior predictive checks. Simulated drawsFirst we perform a posterior predictive check by visualizing the sampled values of $z$, which realizes both a draw from the latent Gaussian process for the public mass shootings rate and the overdispersed counting noise of the negative binomial distribution.
###Code
N_samp = Niter / 2
print len(stan_model_ext_rho_strong['z_rep'])
print Niter
fig, axs = plt.subplots(5,5, figsize=(7,7), sharex='all', sharey='all')
po = axs[0,0].plot(data_years, stan_data['z1'], 'o', c='k', mfc='k', label='Observations', zorder=2, lw=1, ms=4)
axs[0,0].legend(numpoints=1, prop={'size':6})
for i in range(1,25):
draw = np.random.randint(0, N_samp)
py = stan_model_ext_rho_strong['z_rep'][draw][:stan_data['N1']]
axs.flatten()[i].plot(data_years, py, mfc='k', marker='o',
lw=.5, mec='none', ms=2, color='.5', label='GP realization')
axs[0,1].legend(numpoints=1, prop={'size':6})
axs[0,0].set_ylim(0,15)
axs[0,0].set_xticks([1980, 1990, 2000, 2010, 2020])
for ax in axs.flatten():
plt.setp(ax.get_xticklabels(), rotation='vertical', fontsize=9)
plt.setp(ax.get_yticklabels(), fontsize=9)
axs[2,0].set_ylabel('public mass shootings per year', size=9)
###Output
84000
8000
###Markdown
Visual inspection suggests that the observations simulated under the model show similar variation over time as the actual observations (first panel). We note that some realizations have annual counts at the later end of the modeled time range that exceed the largest observed annual count (7 public mass shootings). Some exceedence is expected given the counting noise, but this posterior predictive check could guide revision of the prior on the over-dispersion parameter or the choice of the negative binomial likelihood. Because the relative variance in the annualized counting statistics is high (i.e. public mass shootings are generally infrequent on an annual basis), it is also helpful to examine the model for the underlying shooting rate in detail. Next we plot the posterior distribution of the Gaussian process for the annualized mass shooting rate simulated across a grid of timepoints subsampled between years and extending beyond the current year (2016), effectively interpolating and extrapolating from the observations. The mean of the posterior predictive distribution of the Gaussian process is shown with the solid blue line, and the shaded region shows the 16 and 84th percentile intervals of the posterior (i.e. the "$1\sigma$ range").
###Code
def plot_GP(stan_model_ext):
y2_sum = np.percentile(np.exp(stan_model_ext['y2']), [16,50,84], axis=0)
plt.figure(figsize=(7,5))
pfb = plt.fill_between(data_years_samp, y2_sum[0], y2_sum[2], color='b', alpha=.5)
pfg = plt.plot(data_years_samp, y2_sum[1], c='b', lw=2, label='GP model', zorder=0)
po = plt.plot(data_years, stan_data['z1'], 'o', c='k', label='Observations', zorder=2)
plt.xlabel('Year')
plt.ylabel('Annual rate of public mass shootings')
plt.legend(prop={'size':10}, loc=2)
plt.ylim(0,15)
plt.gca().xaxis.set_minor_locator(FixedLocator(np.arange(min(data_years_samp), max(data_years_samp))))
plt.gca().set_xlim(min(data_years_samp) - 1, max(data_years_samp) + 1)
return pfb, pfg, po
pfb, pfg, po = plot_GP(stan_model_ext_rho_strong)
###Output
_____no_output_____
###Markdown
The Gaussian process captures an increase in the mass shooting rate over the decades and some fluctuations against that trend during certain periods, as we will explore in more detail below. The model does not show any visually apparent deviations from the evolution of the observational time series, although comparison to the data highlights several years with substantially outlying mass shooting totals (e.g. 1993 and 1999). The extrapolated period ($>2016$) suggests a range of possible future rates of growth from the 2016 level.We add random draws from the mean function to visualize our inferences on the long-term time evolution of the mass shooting rate.
###Code
def plot_GP_mu_draws(stan_model_ext):
plot_GP(stan_model_ext)
N_samp = len(stan_model_ext['mu_0'])
px = np.linspace(min(data_years_samp), max(data_years_samp), 100)
pfms = []
for i in range(20):
draw = np.random.randint(0, N_samp)
py = np.exp(stan_model_ext['mu_0'][draw] + (px - min(data_years)) * stan_model_ext['mu_b'][draw])
pfms.append(plt.plot(px, py, c='r',
zorder = 1, label = 'Mean function draws' if i==0 else None))
plt.legend(prop={'size':10}, loc=2)
plot_GP_mu_draws(stan_model_ext_rho_strong)
###Output
_____no_output_____
###Markdown
The comparison between draws of the mean functions (red) and the model posterior (blue) suggests that the mean function captures most of the modeled variation in the shooting rate over time. We can understand the behavior of the Gaussian process covariance function by isolating it from the mean function. We do so by subtracting the linear component of the mean function from the simulated Gaussian process rates ($y_2$) and plotting against the observations.
###Code
y2_gp_rho_strong = np.percentile(np.exp(
stan_model_ext_rho_strong['y2'] -
np.dot(stan_model_ext_rho_strong['mu_b'][:,np.newaxis], (data_years_samp[np.newaxis,:] - min(data_years)))
), [16,25,50,75,84], axis=0)
fig, axs = plt.subplots(2, figsize=(7,7), sharex='all')
pfb = axs[1].fill_between(data_years_samp, y2_gp_rho_strong[1], y2_gp_rho_strong[3], color='b', alpha=.25)
pfb2 = axs[1].fill_between(data_years_samp, y2_gp_rho_strong[0], y2_gp_rho_strong[4], color='b', alpha=.25)
pfg = axs[1].plot(data_years_samp, y2_gp_rho_strong[2], c='b', lw=2, label='GP model (covariance only)', zorder=0)
po = axs[0].plot(data_years, stan_data['z1'], 'o', c='k', label='Observations', zorder=2)
axs[1].axhline(np.exp(stan_model_ext_rho_strong['mu_0'].mean()), color='orange', label='$\mu_0$')
axs[0].set_ylabel('Annual rate of \npublic mass shootings\n(observations)')
axs[1].legend(prop={'size':8}, loc=2, ncol=2)
axs[1].set_ylabel('Annual rate of \npublic mass shootings\n(model)')
axs[1].set_ylim(0, 2.2)
axs[1].xaxis.set_minor_locator(FixedLocator(np.arange(min(data_years_samp), max(data_years_samp))))
axs[1].set_xlim(min(data_years_samp) - 1, max(data_years_samp) + 1)
###Output
_____no_output_____
###Markdown
In this plot, the shaded regions show the interquartile and $[16-84]$th percentile ranges. The fact that the interquartile contours never cross the mean ($\mu_0$) indicates that there is never $>75\%$ probability that the annualized trend deviates from the linear mean function. However, there are times when the interquartile range approaches the mean. Perhaps the most salient feature captured by the covariance function of the Gaussian process is a dip in the annualized rate of public mass shootings in the years from about 2000 to 2005. The model has no features that would seek to explain the causal origin of this dip, although many readers may be surprised by its juxtoposition with the Columbine High School massacre (1999), which is understood to have spawned dozens of "copycat" attacks over time (see e.g. Follman & Andrews 2015).The largest positive deviation from the mean function occurs between about 1988 and 1993. During that time, the mean function itself is very small (see previous figure), so this does not reresent a large absolute deviation. Gaussian process with weak $\rho^{-1}$ priorFor comparison, we visualize the latent Gaussian process under a weak prior for $\rho^{-1}$.
###Code
plot_GP(stan_model_ext_rho_weak)
###Output
_____no_output_____
###Markdown
It's clear from this visualization that the Gaussian process does not capture significant short-timescale variations when the timescale prior is loosened. This model also generally expresses lower uncertainty in the annual public mass shootings rate. Consistent with the reliance on the parametric, linear mean function, the extrapolated predictions do not account for any substantial probability of decrease in the rate of public mass shootings after 2016.We can see the dominance of the mean function over the covariance function directly by again visualizing the isolated Gaussian process covariance function, which shows virtually no deviation from the mean:
###Code
y2_gp_rho_weak = np.percentile(np.exp(
stan_model_ext_rho_weak['y2'] -
np.dot(stan_model_ext_rho_weak['mu_b'][:,np.newaxis], (data_years_samp[np.newaxis,:] - min(data_years)))
), [16,25,50,75,84], axis=0)
fig, axs = plt.subplots(1, figsize=(7,5), sharex='all')
pfb = axs.fill_between(data_years_samp, y2_gp_rho_weak[1], y2_gp_rho_weak[3], color='b', alpha=.25)
pfb2 = axs.fill_between(data_years_samp, y2_gp_rho_weak[0], y2_gp_rho_weak[4], color='b', alpha=.25)
pfg = axs.plot(data_years_samp, y2_gp_rho_weak[2], c='b', lw=2, label='GP model (covariance only)', zorder=0)
axs.axhline(np.exp(stan_model_ext_rho_weak['mu_0'].mean()), color='orange', label='$\mu_0$')
axs.legend(prop={'size':8}, loc=2, ncol=2)
axs.set_ylabel('Annual rate of \npublic mass shootings\n(model)')
axs.set_title(r'Weak $\rho$ prior')
axs.set_ylim(0, 2.2)
axs.xaxis.set_minor_locator(FixedLocator(np.arange(min(data_years_samp), max(data_years_samp))))
axs.set_xlim(min(data_years_samp) - 1, max(data_years_samp) + 1)
###Output
_____no_output_____
###Markdown
Inspection of posterior correlationsBefore we explore the marginalized posterior distributions of the parameters in our model, we take advantage of the fully Bayesian posterior samples generated by the NUTS simulations to understand the correlations between parameters in the posterior distribution.First we note that the parameters of the linearized mean function are highly correlated:
###Code
plt.figure()
pa = plt.hist2d(stan_model_ext_rho_strong['mu_0'],
stan_model_ext_rho_strong['mu_b'],
bins=100, cmap=cm.Reds, cmin=4)
plt.xlabel(r'$\mu_0$ (log shootings)')
plt.ylabel(r'$\mu_b$ (log shootings per year)')
plt.axvline(0, color='k', ls='dashed')
plt.axhline(0, color='k', ls='dashed')
plt.axis([-1.5,1.5,-0.05,.1])
cb = plt.colorbar()
cb.set_label('Number of posterior samples')
###Output
_____no_output_____
###Markdown
If the mean rate of public mass shootings at the beginning of the time series ($\mu_0$) is inferred to be higher, then the increase in the mean function over time needed to explain the observations ($\mu_b$) would be lower. However, at all probable values of $\mu_0$, the distribution of $\mu_b$ is predominantly positive.We can fit a simple linear model to understand more subtle correlations in the multivariate posterior distribution. Here we fit a model for $\rho^{-1}$ as a function of the other major parameters of the model. We standardize the predictors so that we can directly compare the coefficients on the linear model.
###Code
import statsmodels.api as sm
## Assemble data matrices
y = pd.Series(stan_model_ext_rho_strong['inv_rho']); y.name = 'inv_rho'
X = pd.DataFrame({
'eta':np.sqrt(stan_model_ext_rho_strong['eta_sq']),
'mu_0':stan_model_ext_rho_strong['mu_0'],
'mu_b':stan_model_ext_rho_strong['mu_b'],
'sigma':np.sqrt(stan_model_ext_rho_strong['sigma_sq']),
'NB_phi_inv':np.sqrt(stan_model_ext_rho_strong['NB_phi_inv']),
})
## Standardize
X = X - X.mean()
X = X / X.std()
X = sm.add_constant(X)
y = (y - y.mean()) / y.std()
## Fit linear model using stats models
est = sm.OLS(y, X).fit()
## Print summary
print est.summary2()
###Output
Results: Ordinary least squares
====================================================================
Model: OLS Adj. R-squared: 0.054
Dependent Variable: inv_rho AIC: 233702.4431
Date: 2017-01-14 23:07 BIC: 233758.4745
No. Observations: 84000 Log-Likelihood: -1.1685e+05
Df Model: 5 F-statistic: 964.7
Df Residuals: 83994 Prob (F-statistic): 0.00
R-squared: 0.054 Scale: 0.94575
----------------------------------------------------------------------
Coef. Std.Err. t P>|t| [0.025 0.975]
----------------------------------------------------------------------
const -0.0000 0.0034 -0.0000 1.0000 -0.0066 0.0066
NB_phi_inv 0.0170 0.0034 5.0545 0.0000 0.0104 0.0236
eta 0.2318 0.0034 68.7066 0.0000 0.2252 0.2384
mu_0 0.0585 0.0062 9.4253 0.0000 0.0463 0.0706
mu_b 0.0647 0.0062 10.4458 0.0000 0.0525 0.0768
sigma 0.0172 0.0034 5.0956 0.0000 0.0106 0.0238
--------------------------------------------------------------------
Omnibus: 11670.523 Durbin-Watson: 2.016
Prob(Omnibus): 0.000 Jarque-Bera (JB): 18349.721
Skew: 0.978 Prob(JB): 0.000
Kurtosis: 4.189 Condition No.: 3
====================================================================
###Markdown
We see that the most significant correlation is between $\rho^{-1}$ and $\eta$. When we visualize this correlation, we observe that the level of posterior curvature associated with these two variables is small, though significant.
###Code
plt.figure()
pa = plt.hist2d(np.sqrt(stan_model_ext_rho_strong['eta_sq']),
stan_model_ext_rho_strong['inv_rho'],
bins=40, cmap=cm.Reds, cmin=4,
range = [[0,1],[1,12]])
plt.xlabel(r'$\eta$ (log shootings per year)')
plt.ylabel(r'$\rho^{-1}$ (years)')
sqrt_eta = np.sqrt(stan_model_ext_rho_strong['eta_sq'])
px = np.linspace(min(sqrt_eta), max(sqrt_eta), 10)
px_std = (px - np.mean(sqrt_eta)) / np.std(sqrt_eta)
plt.plot(px,
# Constant term
(est.params[est.model.exog_names.index('const')] +
# Linear term
px * est.params[est.model.exog_names.index('eta')]
# Standardization adjustment
* stan_model_ext_rho_strong['inv_rho'].std()) + stan_model_ext_rho_strong['inv_rho'].mean())
plt.axis()
cb = plt.colorbar()
cb.set_label('Number of posterior samples')
plt.title(r'Strong prior on $\rho^{-1}$')
###Output
_____no_output_____
###Markdown
When we explore the same correlation in the posterior of the model with a weak prior specified on the timescale hyperparameter, we see somewhat different results:
###Code
## Assemble data matrices
y = pd.Series(np.log(stan_model_ext_rho_weak['inv_rho'])); y.name = 'inv_rho'
X = pd.DataFrame({
'eta':np.sqrt(stan_model_ext_rho_weak['eta_sq']),
'mu_0':stan_model_ext_rho_weak['mu_0'],
'mu_b':stan_model_ext_rho_weak['mu_b'],
'sigma':np.sqrt(stan_model_ext_rho_weak['sigma_sq']),
'NB_phi_inv':np.sqrt(stan_model_ext_rho_weak['NB_phi_inv']),
})
## Standardize
X = X - X.mean()
X = X / X.std()
X = sm.add_constant(X)
y = (y - y.mean()) / y.std()
## Fit linear model using stats models
est = sm.OLS(y, X).fit()
## Print summary
print est.summary2()
plt.figure()
pa = plt.hist2d(np.sqrt(stan_model_ext_rho_weak['eta_sq']),
stan_model_ext_rho_weak['inv_rho'],
bins=40, cmap=cm.Reds, cmin=4,
range = [[0,4],[1,300]])
plt.xlabel(r'$\eta$ (log shootings per year)')
plt.ylabel(r'$\rho^{-1}$ (years)')
sqrt_eta = np.sqrt(stan_model_ext_rho_weak['eta_sq'])
px = np.linspace(min(sqrt_eta), max(sqrt_eta), 10)
px_std = (px - np.mean(sqrt_eta)) / np.std(sqrt_eta)
plt.plot(px,
# Constant term
(est.params[est.model.exog_names.index('const')] +
# Linear term
px * est.params[est.model.exog_names.index('eta')]
# Standardization adjustment
* stan_model_ext_rho_weak['inv_rho'].std()) + stan_model_ext_rho_weak['inv_rho'].mean())
plt.axis()
cb = plt.colorbar()
cb.set_label('Number of posterior samples')
plt.title(r'Weak prior on $\rho^{-1}$')
###Output
Results: Ordinary least squares
====================================================================
Model: OLS Adj. R-squared: 0.028
Dependent Variable: inv_rho AIC: 235996.6613
Date: 2017-01-14 23:07 BIC: 236052.6927
No. Observations: 84000 Log-Likelihood: -1.1799e+05
Df Model: 5 F-statistic: 486.1
Df Residuals: 83994 Prob (F-statistic): 0.00
R-squared: 0.028 Scale: 0.97194
----------------------------------------------------------------------
Coef. Std.Err. t P>|t| [0.025 0.975]
----------------------------------------------------------------------
const 0.0000 0.0034 0.0000 1.0000 -0.0067 0.0067
NB_phi_inv -0.0065 0.0034 -1.9194 0.0549 -0.0132 0.0001
eta 0.1635 0.0034 47.9897 0.0000 0.1568 0.1701
mu_0 0.0101 0.0037 2.7591 0.0058 0.0029 0.0172
mu_b -0.0321 0.0037 -8.7932 0.0000 -0.0393 -0.0250
sigma -0.0116 0.0034 -3.4000 0.0007 -0.0182 -0.0049
--------------------------------------------------------------------
Omnibus: 4585.854 Durbin-Watson: 1.816
Prob(Omnibus): 0.000 Jarque-Bera (JB): 5770.812
Skew: -0.547 Prob(JB): 0.000
Kurtosis: 3.673 Condition No.: 1
====================================================================
###Markdown
Again, $\eta$ is the parameter most significantly correlated with $\rho^{-1}$, but now the 2D posterior visualization shows that this correlation is substantially non-linear. In particular for the model with the weak prior on $\rho$, $\eta$ is constrained to much smaller values when the timescale $\rho^{-1}$ is small. In other words, in models that permit variations from the mean function on timescales smaller than the observational range ($\sim35$ years), the amplitude of those variations is constrained to be very small. In any scenario, as we have seen, the importance of the covariance function is minimal under this prior. Parameter inferences Below we show the marginalized posterior distributions of the parameters of the Gaussian process under the strong prior on $\rho$.
###Code
def gt0(y, x, lbound=0, ubound=np.inf):
y[(x<lbound) & (x>ubound)] = 0
return y
def marg_post_plot(stan_model_ext, alpha_rho, beta_rho, Nhist=25):
hyp_dic = {
'eta_sq': ('$\\eta$', np.sqrt, 'log shootings per year', lambda x: sstats.cauchy.pdf(x**2, 0, 1)),
'inv_rho': ('$\\rho^{-1}$', lambda x: x, 'years', lambda x: gt0(sstats.gamma.pdf(x, alpha_rho, scale=beta_rho), x, lbound=1)),
'sigma_sq': ('$\\sigma$', np.sqrt, 'log shootings per year', lambda x: sstats.cauchy.pdf(x**2, 0, 1)),
'NB_phi_inv':('$\\rm{NB}_\\phi^{-1}$', lambda x:x, '', lambda x: sstats.cauchy.pdf(x**2, 0, 0.5)),
}
meanfunc_dic = {
'mu_0': ('$\\mu_0$', lambda x: x, 'log shootings per year, '+str(np.min(data_years)), lambda x: sstats.norm.pdf(x, 0,2)),
'mu_b': ('$\\mu_b$', lambda x: x, 'annual increase in\nlog shootings per year', lambda x: sstats.norm.pdf(x, 0,0.2)),
}
for name,pdic in (('hyper', hyp_dic), ('meanfunc', meanfunc_dic)):
fig,axs = plt.subplots(1,len(pdic), figsize=(2.5*len(pdic), 2.5), sharey='all')
axs[0].set_ylabel('HMC samples ({} total)'.format(N_samp))
for i,hyp in enumerate(pdic.keys()):
samps = pdic[hyp][1](stan_model_ext[hyp])
hn, hb, hp = axs[i].hist(samps, Nhist, edgecolor='none', facecolor='.5', label='Posterior samples')
ppx = np.linspace(np.min(samps), np.max(samps), 10000)
ppy = pdic[hyp][1]( pdic[hyp][3](ppx) )
## Normalize
ppy *= len(samps) / np.sum(ppy) * len(ppy) / len(hn)
axs[i].plot(ppx, ppy, color='b', zorder=2, label='Hyperprior')
axs[i].xaxis.set_major_locator(MaxNLocator(3))
axs[i].xaxis.set_minor_locator(AutoMinorLocator(3))
axs[i].set_xlabel(pdic[hyp][0] + ' ({})'.format(pdic[hyp][2]), ha='center')
axs[i].axvline(0, ls='dashed', color='.2')
axs[-1].legend(prop={'size':9})
print "Strong prior on rho:"
marg_post_plot(stan_model_ext_rho_strong, stan_data_rho_strong['alpha_rho'], 1/stan_data_rho_strong['beta_rho'], Nhist=100)
###Output
Strong prior on rho:
###Markdown
The comparison of the posterior and prior distributions show strong evidence from the data to identify most hyperparameters. The posterior for $\mu_0$ shows a concentration around a baseline rate of $\exp(-1)\sim0.4$ to $\exp(1)\sim 3$ public mass shootings per year at the start of the dataset, 1982, reflecting a variance, much smaller than the corresponding prior. The negative binomial overdispersion parameter ($\phi^{-1}$) is concentrated towards very small values $\ll 1$, indicating that the Poisson disitrubtion is a good approximation to the variance in the observations. The amplitude of the Gaussian process covariance function, $\eta$, is strongly shifted from the mode of the prior distribution, to a mean of $\exp(0.5)\sim1.6$ public mass shootings per year. The variance of the Gaussian process covariance function, $\sigma$, has a posterior variance much smaller than the prior distribution.The posterior distribution of $\rho^{-1}$ is a notable exception. It shows no visual deviation from the prior distribution, indicating that this parameter is not identified by the observations.Next we explore the same marginalized posteriors under the weak prior on $\rho$.
###Code
print "Weak prior on rho:"
marg_post_plot(stan_model_ext_rho_weak, stan_data_rho_weak['alpha_rho'], 1/stan_data_rho_weak['beta_rho'], Nhist=100)
###Output
Weak prior on rho:
###Markdown
With the weak prior on $\rho$, most parameters have posterior distributions nearly identical to their distributions under the strong prior on $\rho$. In particular, the conclusions about the mean function parameters ($\mu_0$ and $\mu_b$), $\phi$, and $\sigma$ seem robust to the choice of prior. Importantly, the $\rho$ parameter is again largely non-identified. Its posterior distribution generally follows the weaker prior, although it shows a posterior probability less than the prior for the very smallest values. The consequence is that the models sampled from the Gaussian process have very long timescales for their covariance function. The distribution of the amplitude, $\eta$, is skewed to larger values under the weaker prior, although the amplitude of the mean function has little consequence when the time variation is negligable (as discussed in the previous section). Model predictionsWe calculate the posterior probability that the annualized rate of public mass shootings has increased in the US since 1982 ($\mu_b > 0$).
###Code
print_ext_names = ['...with strong prior on rho: ', '...with weak prior on rho: ']
print 'p(mu_b > 0):'
for i in range(2):
print print_ext_names[i]+'%0.0f'%(np.mean(exts[i]['mu_b'] > 0)*100)+'%'
###Output
p(mu_b > 0):
...with strong prior on rho: 97%
...with weak prior on rho: 97%
###Markdown
This indicates strong statistical evidence for a long term increase in the annualized rate of public mass shootings over the past three decades, regardless of our choice of prior for the timescale parameter, $\rho$. In linear terms, the mean percentage increase in the rate of public mass shootings is found to be,
###Code
zincreaseraw = {}
for i in range(2):
zincreaseraw[i] = (np.exp((2016 - np.min(data_years)) * exts[i]['mu_b']) - 1) * 100
zincrease = np.percentile(zincreaseraw[i], [16,50,84])
print print_ext_names[i]+'%0.0f'%round(zincrease[1], -1)+'^{+%0.0f'%round(zincrease[2]-zincrease[1], -1)+'}_{-%0.0f'%round(zincrease[1]-zincrease[0], -1)+'}'
###Output
...with strong prior on rho: 350^{+500}_{-230}
...with weak prior on rho: 360^{+490}_{-230}
###Markdown
While the uncertainty interval is large, the $1\sigma$ estimate suggests at least a doubling in the annualized rate of public mass shootings over these three decades, and more likely a quadrupling or greater increase.For comparison, the US population has grown from $\sim231$ million to $318$ million residents according to [worldbook data](http://data.worldbank.org/indicator/SP.POP.TOTL?cid=GPD_1), an increase of $38\%$, over that same period. The model posterior suggests that the rate of public mass shootings has surpassed the rate of population growth with high confidence:
###Code
for i in range(2):
print print_ext_names[i]+'%0.0f'%(np.mean(zincreaseraw[i] > 38)*100)+'%'
###Output
...with strong prior on rho: 94%
...with weak prior on rho: 94%
###Markdown
Cohen et al. (2014) reported a tripling in the rate of mass shootings between 2011 and 2014 on the basis of a SPC methodology. Our inference on the mean function of the Gaussian process, because it is parameterized as linear over the full time extent of the modeled period, does not directly address this claim. But the simulated predictions of the Gaussian process, including the covariance component, can generate relevant comparisons.
###Code
i1 = np.argmin(abs(data_years_samp - 2011.5))
i2 = np.argmin(abs(data_years_samp - 2014.5))
py = np.exp(stan_model_ext_rho_strong['y2'][:,i2]) / np.exp(stan_model_ext_rho_strong['y2'][:,i1])
plt.figure()
ph = plt.hist(py, 50, edgecolor='none', facecolor='.5', range=[0,8], normed=1)
plt.xlabel('Relative rate of public mass shootings in 2014 versus 2011')
plt.ylabel('Posterior probability')
plt.axvline(1, color='k', label='Unity')
plt.axvline(np.mean(py), color='b', label='Mean posterior estimate', ls='dashed')
plt.axvline(3, color='g', label='Cohen et al. estimate', lw=2, ls='dotted')
plt.legend()
print "Probability that rate increased: ", '%0.0f'%(np.mean(py > 1) * 100), '%'
print "Mean predicted level of increase: ", '%0.1f'%(np.mean(py)), 'X'
print "Probability of increase by at least 3X: ", '%0.2f'%(np.mean(py > 3)), '%'
###Output
Probability that rate increased: 58 %
Mean predicted level of increase: 1.3 X
Probability of increase by at least 3X: 0.03 %
|
week02_autodiff/other_frameworks/seminar_tensorflow.ipynb | ###Markdown
Going deeper with TensorflowIn this seminar, we're going to play with [Tensorflow](https://www.tensorflow.org/) and see how it helps us build deep learning models.If you're running this notebook outside the course environment, you'll need to install tensorflow:* `pip install tensorflow` should install cpu-only TF on Linux & Mac OS* If you want GPU support from offset, see [TF install page](https://www.tensorflow.org/install/)
###Code
import tensorflow as tf
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.1)
s = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
###Output
_____no_output_____
###Markdown
Warming upFor starters, let's implement a python function that computes the sum of squares of numbers from 0 to N-1.* Use numpy or python* An array of numbers 0 to N - numpy.arange(N)
###Code
import numpy as np
def sum_squares(N):
return <student.Implement_me()>
%%time
sum_squares(10**8)
###Output
_____no_output_____
###Markdown
Tensoflow teaserDoing the very same thing
###Code
#I gonna be your function parameter
N = tf.placeholder('int64', name="input_to_your_function")
#i am a recipe on how to produce sum of squares of arange of N given N
result = tf.reduce_sum((tf.range(N)**2))
%%time
#example of computing the same as sum_squares
print(result.eval({N:10**8}))
###Output
_____no_output_____
###Markdown
How does it work?1. define placeholders where you'll send inputs;2. make symbolic graph: a recipe for mathematical transformation of those placeholders;3. compute outputs of your graph with particular values for each placeholder * output.eval({placeholder:value}) * s.run(output, {placeholder:value})* So far there are two main entities: "placeholder" and "transformation"* Both can be numbers, vectors, matrices, tensors, etc.* Both can be int32/64, floats of booleans (uint8) of various size.* You can define new transformations as an arbitrary operation on placeholders and other transformations * tf.reduce_sum(tf.arange(N)\**2) are 3 sequential transformations of placeholder N * There's a tensorflow symbolic version for every numpy function * `a+b, a/b, a**b, ...` behave just like in numpy * np.mean -> tf.reduce_mean * np.arange -> tf.range * np.cumsum -> tf.cumsum * If if you can't find the op you need, see the [docs](https://www.tensorflow.org/api_docs/python). Still confused? We gonna fix that.
###Code
#Default placeholder that can be arbitrary float32 scalar, vertor, matrix, etc.
arbitrary_input = tf.placeholder('float32')
#Input vector of arbitrary length
input_vector = tf.placeholder('float32',shape=(None,))
#Input vector that _must_ have 10 elements and integer type
fixed_vector = tf.placeholder('int32',shape=(10,))
#Matrix of arbitrary n_rows and 15 columns (e.g. a minibatch your data table)
input_matrix = tf.placeholder('float32',shape=(None,15))
#You can generally use None whenever you don't need a specific shape
input1 = tf.placeholder('float64',shape=(None,100,None))
input2 = tf.placeholder('int32',shape=(None,None,3,224,224))
#elementwise multiplication
double_the_vector = input_vector*2
#elementwise cosine
elementwise_cosine = tf.cos(input_vector)
#difference between squared vector and vector itself
vector_squares = input_vector**2 - input_vector
#Practice time: create two vectors of type float32
my_vector = <student.init_float32_vector()>
my_vector2 = <student.init_one_more_such_vector()>
#Write a transformation(recipe):
#(vec1)*(vec2) / (sin(vec1) +1)
my_transformation = <student.implementwhatwaswrittenabove()>
print(my_transformation)
#it's okay, it's a symbolic graph
#
dummy = np.arange(5).astype('float32')
my_transformation.eval({my_vector:dummy,my_vector2:dummy[::-1]})
###Output
_____no_output_____
###Markdown
Visualizing graphsIt's often useful to visualize the computation graph when debugging or optimizing. Interactive visualization is where tensorflow really shines as compared to other frameworks. There's a special instrument for that, called Tensorboard. You can launch it from console:```tensorboard --logdir=/tmp/tboard --port=7007```If you're pathologically afraid of consoles, try this:```os.system("tensorboard --logdir=/tmp/tboard --port=7007 &"```_(but don't tell anyone we taught you that)_
###Code
# launch tensorflow the ugly way, uncomment if you need that
import os
port = 6000 + os.getuid()
print("Port: %d" % port)
#!killall tensorboard
os.system("tensorboard --logdir=./tboard --port=%d &" % port)
# show graph to tensorboard
writer = tf.summary.FileWriter("./tboard", graph=tf.get_default_graph())
writer.close()
###Output
_____no_output_____
###Markdown
One basic functionality of tensorboard is drawing graphs. One you've run the cell above, go to `localhost:7007` in your browser and switch to _graphs_ tab in the topbar. Here's what you should see:Tensorboard also allows you to draw graphs (e.g. learning curves), record images & audio ~~and play flash games~~. This is useful when monitoring learning progress and catching some training issues.One researcher said:```If you spent last four hours of your worktime watching as your algorithm prints numbers and draws figures, you're probably doing deep learning wrong.``` You can read more on tensorboard usage [here](https://www.tensorflow.org/get_started/graph_viz) Do It Yourself__[2 points max]__
###Code
# Quest #1 - implement a function that computes a mean squared error of two input vectors
# Your function has to take 2 vectors and return a single number
<student.define_inputs_and_transformations()>
mse =<student.define_transformation()>
compute_mse = lambda vector1, vector2: <how to run you graph?>
# Tests
from sklearn.metrics import mean_squared_error
for n in [1,5,10,10**3]:
elems = [np.arange(n),np.arange(n,0,-1), np.zeros(n),
np.ones(n),np.random.random(n),np.random.randint(100,size=n)]
for el in elems:
for el_2 in elems:
true_mse = np.array(mean_squared_error(el,el_2))
my_mse = compute_mse(el,el_2)
if not np.allclose(true_mse,my_mse):
print('Wrong result:')
print('mse(%s,%s)' % (el,el_2))
print("should be: %f, but your function returned %f" % (true_mse,my_mse))
raise ValueError,"Что-то не так"
print("All tests passed")
###Output
_____no_output_____
###Markdown
variablesThe inputs and transformations have no value outside function call. This isn't too comfortable if you want your model to have parameters (e.g. network weights) that are always present, but can change their value over time.Tensorflow solves this with `tf.Variable` objects.* You can assign variable a value at any time in your graph* Unlike placeholders, there's no need to explicitly pass values to variables when `s.run(...)`-ing* You can use variables the same way you use transformations
###Code
#creating shared variable
shared_vector_1 = tf.Variable(initial_value=np.ones(5))
#initialize variable(s) with initial values
s.run(tf.global_variables_initializer())
#evaluating shared variable (outside symbolicd graph)
print("initial value", s.run(shared_vector_1))
# within symbolic graph you use them just as any other inout or transformation, not "get value" needed
#setting new value
s.run(shared_vector_1.assign(np.arange(5)))
#getting that new value
print("new value", s.run(shared_vector_1))
###Output
_____no_output_____
###Markdown
tf.gradients - why graphs matter* Tensorflow can compute derivatives and gradients automatically using the computation graph* Gradients are computed as a product of elementary derivatives via chain rule:$$ {\partial f(g(x)) \over \partial x} = {\partial f(g(x)) \over \partial g(x)}\cdot {\partial g(x) \over \partial x} $$It can get you the derivative of any graph as long as it knows how to differentiate elementary operations
###Code
my_scalar = tf.placeholder('float32')
scalar_squared = my_scalar**2
#a derivative of scalar_squared by my_scalar
derivative = tf.gradients(scalar_squared, my_scalar)[0]
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(-3,3)
x_squared, x_squared_der = s.run([scalar_squared,derivative],
{my_scalar:x})
plt.plot(x, x_squared,label="x^2")
plt.plot(x, x_squared_der, label="derivative")
plt.legend();
###Output
_____no_output_____
###Markdown
Why that rocks
###Code
my_vector = tf.placeholder('float32',[None])
#Compute the gradient of the next weird function over my_scalar and my_vector
#warning! Trying to understand the meaning of that function may result in permanent brain damage
weird_psychotic_function = tf.reduce_mean((my_vector+my_scalar)**(1+tf.nn.moments(my_vector,[0])[1]) + 1./ tf.atan(my_scalar))/(my_scalar**2 + 1) + 0.01*tf.sin(2*my_scalar**1.5)*(tf.reduce_sum(my_vector)* my_scalar**2)*tf.exp((my_scalar-4)**2)/(1+tf.exp((my_scalar-4)**2))*(1.-(tf.exp(-(my_scalar-4)**2))/(1+tf.exp(-(my_scalar-4)**2)))**2
der_by_scalar = <student.compute_grad_over_scalar()>
der_by_vector = <student.compute_grad_over_vector()>
#Plotting your derivative
scalar_space = np.linspace(1, 7, 100)
y = [s.run(weird_psychotic_function, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y, label='function')
y_der_by_scalar = [s.run(der_by_scalar, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y_der_by_scalar, label='derivative')
plt.grid()
plt.legend();
###Output
_____no_output_____
###Markdown
Almost done - optimizersWhile you can perform gradient descent by hand with automatic grads from above, tensorflow also has some optimization methods implemented for you. Recall momentum & rmsprop?
###Code
y_guess = tf.Variable(np.zeros(2,dtype='float32'))
y_true = tf.range(1,3,dtype='float32')
loss = tf.reduce_mean((y_guess - y_true + tf.random_normal([2]))**2)
optimizer = tf.train.MomentumOptimizer(0.01,0.9).minimize(loss,var_list=y_guess)
#same, but more detailed:
#updates = [[tf.gradients(loss,y_guess)[0], y_guess]]
#optimizer = tf.train.MomentumOptimizer(0.01,0.9).apply_gradients(updates)
from IPython.display import clear_output
s.run(tf.global_variables_initializer())
guesses = [s.run(y_guess)]
for _ in range(100):
s.run(optimizer)
guesses.append(s.run(y_guess))
clear_output(True)
plt.plot(*zip(*guesses),marker='.')
plt.scatter(*s.run(y_true),c='red')
plt.show()
###Output
_____no_output_____
###Markdown
Logistic regression exampleImplement the regular logistic regression training algorithmTips:* Use a shared variable for weights* X and y are potential inputs* Compile 2 functions: * `train_function(X, y)` - returns error and computes weights' new values __(through updates)__ * `predict_fun(X)` - just computes probabilities ("y") given data We shall train on a two-class MNIST dataset* please note that target `y` are `{0,1}` and not `{-1,1}` as in some formulae
###Code
from sklearn.datasets import load_digits
mnist = load_digits(2)
X,y = mnist.data, mnist.target
print("y [shape - %s]:" % (str(y.shape)), y[:10])
print("X [shape - %s]:" % (str(X.shape)))
print('X:\n',X[:3,:10])
print('y:\n',y[:10])
plt.imshow(X[0].reshape([8,8]))
# inputs and shareds
weights = <student.code_variable()>
input_X = <student.code_placeholder()>
input_y = <student.code_placeholder()>
predicted_y = <predicted probabilities for input_X>
loss = <logistic loss (scalar, mean over sample)>
optimizer = <optimizer that minimizes loss>
train_function = <compile function that takes X and y, returns log loss and updates weights>
predict_function = <compile function that takes X and computes probabilities of y>
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.metrics import roc_auc_score
for i in range(5):
<run optimizer operation>
loss_i = <compute loss at iteration i>
print("loss at iter %i:%.4f" % (i, loss_i))
print("train auc:",roc_auc_score(y_train, predict_function(X_train)))
print("test auc:",roc_auc_score(y_test, predict_function(X_test)))
print ("resulting weights:")
plt.imshow(shared_weights.get_value().reshape(8, -1))
plt.colorbar();
###Output
_____no_output_____
###Markdown
Bonus: my1stNNYour ultimate task for this week is to build your first neural network [almost] from scratch and pure tensorflow.This time you will same digit recognition problem, but at a larger scale* images are now 28x28* 10 different digits* 50k samplesNote that you are not required to build 152-layer monsters here. A 2-layer (one hidden, one output) NN should already have ive you an edge over logistic regression.__[bonus score]__If you've already beaten logistic regression with a two-layer net, but enthusiasm still ain't gone, you can try improving the test accuracy even further! The milestones would be 95%/97.5%/98.5% accuraсy on test set.__SPOILER!__At the end of the notebook you will find a few tips and frequently made mistakes. If you feel enough might to shoot yourself in the foot without external assistance, we encourage you to do so, but if you encounter any unsurpassable issues, please do look there before mailing us.
###Code
from mnist import load_dataset
#[down]loading the original MNIST dataset.
#Please note that you should only train your NN on _train sample,
# _val can be used to evaluate out-of-sample error, compare models or perform early-stopping
# _test should be hidden under a rock untill final evaluation... But we both know it is near impossible to catch you evaluating on it.
X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
print (X_train.shape,y_train.shape)
plt.imshow(X_train[0,0])
<here you could just as well create computation graph>
<this may or may not be a good place to evaluating loss and optimizer>
<this may be a perfect cell to write a training&evaluation loop in>
<predict & evaluate on test here, right? No cheating pls.>
###Output
_____no_output_____
###Markdown
Down the rabbit hole with TensorflowIn this seminar, we're going to play with [Tensorflow](https://www.tensorflow.org/) and see how it helps you build deep learning models.If you're running this notebook outside the course environment, you'll need to install tensorflow:* `pip install tensorflow` should install cpu-only TF on Linux & Mac OS* If you want GPU support from offset, see [TF install page](https://www.tensorflow.org/install/)
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
# session is main tensorflow object. You ask session to compute stuff for you.
sess = tf.InteractiveSession()
###Output
_____no_output_____
###Markdown
Warming upFor starters, let's implement a python function that computes the sum of squares of numbers from 0 to N-1.* Use numpy or python* An array of numbers 0 to N - numpy.arange(N)
###Code
def sum_squares(N):
return <student.implement_me()>
%%time
sum_squares(10**8)
###Output
_____no_output_____
###Markdown
__Same with tensorflow__
###Code
# "i will insert N here later"
N = tf.placeholder('int64', name="input_to_your_function")
# a recipe on how to produce {sum of squares of arange of N} given N
result = tf.reduce_sum((tf.range(N)**2))
%%time
# dear session, compute the result please. Here's your N.
print(sess.run(result, {N: 10**8}))
# hint: run it several times to let tensorflow "warm up"
###Output
_____no_output_____
###Markdown
How it works: computation graphs1. create placeholders for future inputs;2. define symbolic graph: a recipe for mathematical transformation of those placeholders;3. compute outputs of your graph with particular values for each placeholder * ```sess.run(outputs, {placeholder1:value1, placeholder2:value2})``` * OR output.eval({placeholder:value}) Still confused? We gonna fix that. __Placeholders and constants__
###Code
# placeholder that can be arbitrary float32 scalar, vertor, matrix, etc.
arbitrary_input = tf.placeholder('float32')
# input vector of arbitrary length
input_vector = tf.placeholder('float32',shape=(None,))
# input vector that _must_ have 10 elements and integer type
fixed_vector = tf.placeholder('int32',shape=(10,))
# you can generally use None whenever you don't need a specific shape
input1 = tf.placeholder('float64',shape=(None, 100, None))
input2 = tf.placeholder('int32',shape=(None, None, 3, 224, 224))
###Output
_____no_output_____
###Markdown
You can create new __tensors__ with arbitrary operations on placeholders, constants and other tensors.* tf.reduce_sum(tf.arange(N)\**2) are 3 sequential transformations of placeholder N* there's a tensorflow symbolic version for every numpy function * `a + b, a / b, a ** b, ...` behave just like in numpy * np.zeros -> tf.zeros * np.sin -> tf.sin * np.mean -> tf.reduce_mean * np.arange -> tf.range There are tons of other stuff in tensorflow, see the [docs](https://www.tensorflow.org/api_docs/python) or learn as you go with __shift+tab__.
###Code
# elementwise multiplication
double_the_vector = input_vector * 2
# elementwise cosine
elementwise_cosine = tf.cos(input_vector)
# elementwise difference between squared vector and it's means - with some random salt
vector_squares = input_vector ** 2 - tf.reduce_mean(input_vector) + tf.random_normal(tf.shape(input_vector))
###Output
_____no_output_____
###Markdown
Practice 1: polar pretzels_inspired by [this post](https://www.quora.com/What-are-the-most-interesting-equation-plots)_There are some simple mathematical functions with cool plots. For one, consider this:$$ x(t) = t - 1.5 * cos( 15 t) $$$$ y(t) = t - 1.5 * sin( 16 t) $$
###Code
t = tf.placeholder('float32')
# compute x(t) and y(t) as defined above.
x = ###YOUR CODE
y = ###YOUR CODE
x_points, y_points = sess.run([x, y], {t: np.linspace(-10, 10, num=10000)})
plt.plot(x_points, y_points);
###Output
_____no_output_____
###Markdown
Visualizing graphs with TensorboardIt's often useful to visualize the computation graph when debugging or optimizing. Interactive visualization is where tensorflow really shines as compared to other frameworks. There's a special instrument for that, called Tensorboard. You can launch it from console:__```tensorboard --logdir=/tmp/tboard --port=7007```__If you're pathologically afraid of consoles, try this:__```import os; os.system("tensorboard --logdir=/tmp/tboard --port=7007 &")```___(but don't tell anyone we taught you that)_ One basic functionality of tensorboard is drawing graphs. One you've run the cell above, go to `localhost:7007` in your browser and switch to _graphs_ tab in the topbar. Here's what you should see:Tensorboard also allows you to draw graphs (e.g. learning curves), record images & audio ~~and play flash games~~. This is useful when monitoring learning progress and catching some training issues.One researcher said:```If you spent last four hours of your worktime watching as your algorithm prints numbers and draws figures, you're probably doing deep learning wrong.``` You can read more on tensorboard usage [here](https://www.tensorflow.org/get_started/graph_viz) Practice 2: mean squared error
###Code
# Quest #1 - implement a function that computes a mean squared error of two input vectors
# Your function has to take 2 vectors and return a single number
<student.define_inputs_and_transformations()>
mse =<student.define_transformation()>
compute_mse = lambda vector1, vector2: sess.run(<how to run you graph?>, {})
# Tests
from sklearn.metrics import mean_squared_error
for n in [1, 5, 10, 10 ** 3]:
elems = [np.arange(n),np.arange(n,0,-1), np.zeros(n),
np.ones(n),np.random.random(n),np.random.randint(100,size=n)]
for el in elems:
for el_2 in elems:
true_mse = np.array(mean_squared_error(el,el_2))
my_mse = compute_mse(el,el_2)
if not np.allclose(true_mse,my_mse):
print('Wrong result:')
print('mse(%s,%s)' % (el,el_2))
print("should be: %f, but your function returned %f" % (true_mse,my_mse))
raise ValueError,"Что-то не так"
print("All tests passed")
###Output
_____no_output_____
###Markdown
Tensorflow variablesThe inputs and transformations have no value outside function call. That's a bit unnatural if you want your model to have parameters (e.g. network weights) that are always present, but can change their value over time.Tensorflow solves this with `tf.Variable` objects.* You can assign variable a value at any time in your graph* Unlike placeholders, there's no need to explicitly pass values to variables when `s.run(...)`-ing* You can use variables the same way you use transformations
###Code
# creating shared variable
shared_vector_1 = tf.Variable(initial_value=np.ones(5))
# initialize all variables with initial values
sess.run(tf.global_variables_initializer())
# evaluating shared variable (outside symbolicd graph)
print("initial value", sess.run(shared_vector_1))
# within symbolic graph you use them just as any other inout or transformation, not "get value" needed
# setting new value manually
sess.run(shared_vector_1.assign(np.arange(5)))
#getting that new value
print("new value", sess.run(shared_vector_1))
###Output
_____no_output_____
###Markdown
tf.gradients - why graphs matter* Tensorflow can compute derivatives and gradients automatically using the computation graph* Gradients are computed as a product of elementary derivatives via chain rule:$$ {\partial f(g(x)) \over \partial x} = {\partial f(g(x)) \over \partial g(x)}\cdot {\partial g(x) \over \partial x} $$It can get you the derivative of any graph as long as it knows how to differentiate elementary operations
###Code
my_scalar = tf.placeholder('float32')
scalar_squared = my_scalar ** 2
#a derivative of scalar_squared by my_scalar
derivative = tf.gradients(scalar_squared, [my_scalar])[0]
x = np.linspace(-3,3)
x_squared, x_squared_der = sess.run([scalar_squared, derivative], {my_scalar:x})
plt.plot(x, x_squared,label="x^2")
plt.plot(x, x_squared_der, label="derivative")
plt.legend();
###Output
_____no_output_____
###Markdown
Why autograd is cool
###Code
my_vector = tf.placeholder('float32',[None])
#Compute the gradient of the next weird function over my_scalar and my_vector
#warning! Trying to understand the meaning of that function may result in permanent brain damage
weird_psychotic_function = tf.reduce_mean((my_vector+my_scalar)**(1+tf.nn.moments(my_vector,[0])[1]) + 1./ tf.atan(my_scalar))/(my_scalar**2 + 1) + 0.01*tf.sin(2*my_scalar**1.5)*(tf.reduce_sum(my_vector)* my_scalar**2)*tf.exp((my_scalar-4)**2)/(1+tf.exp((my_scalar-4)**2))*(1.-(tf.exp(-(my_scalar-4)**2))/(1+tf.exp(-(my_scalar-4)**2)))**2
der_by_scalar = <student.compute_grad_over_scalar()>
der_by_vector = <student.compute_grad_over_vector()>
#Plotting your derivative
scalar_space = np.linspace(1, 7, 100)
y = [sess.run(weird_psychotic_function, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y, label='function')
y_der_by_scalar = [sess.run(der_by_scalar, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y_der_by_scalar, label='derivative')
plt.grid()
plt.legend();
###Output
_____no_output_____
###Markdown
Almost done - optimizersWhile you can perform gradient descent by hand with automatic grads from above, tensorflow also has some optimization methods implemented for you. Recall momentum & rmsprop?
###Code
y_guess = tf.Variable(np.zeros(2,dtype='float32'))
y_true = tf.range(1,3,dtype='float32')
loss = tf.reduce_mean((y_guess - y_true + tf.random_normal([2]))**2)
optimizer = tf.train.MomentumOptimizer(0.01,0.9).minimize(loss,var_list=y_guess)
# same, but more detailed:
# updates = [[tf.gradients(loss,y_guess)[0], y_guess]]
# optimizer = tf.train.MomentumOptimizer(0.01,0.9).apply_gradients(updates)
from IPython.display import clear_output
sess.run(tf.global_variables_initializer())
guesses = [sess.run(y_guess)]
for _ in range(100):
sess.run(optimizer)
guesses.append(sess.run(y_guess))
clear_output(True)
plt.plot(*zip(*guesses), marker='.')
plt.scatter(*sess.run(y_true), c='red')
plt.show()
###Output
_____no_output_____
###Markdown
Logistic regression exampleImplement the regular logistic regression training algorithm We shall train on a two-class MNIST dataset. This is a binary classification problem, so we'll train a __Logistic Regression with sigmoid__.$$P(y_i | X_i) = \sigma(W \cdot X_i + b) ={ 1 \over {1+e^{- [W \cdot X_i + b]}} }$$The natural choice of loss function is to use binary crossentropy (aka logloss, negative llh):$$ L = {1 \over N} \underset{X_i,y_i} \sum - [ y_i \cdot log P(y_i | X_i) + (1-y_i) \cdot log (1-P(y_i | X_i)) ]$$Mind the minus :)
###Code
from sklearn.datasets import load_digits
X, y = load_digits(2, return_X_y=True)
print("y [shape - %s]:" % (str(y.shape)), y[:10])
print("X [shape - %s]:" % (str(X.shape)))
print('X:\n', X[:3,:10])
print('y:\n', y[:10])
plt.imshow(X[0].reshape([8,8]))
# inputs and shareds
weights = <student.create_variable()>
input_X = <student.create_placeholder_matrix()>
input_y = <student.code_placeholder_vector()>
predicted_y_proba = <predicted probabilities for input_X using weights>
loss = <logistic loss (scalar, mean over sample) between predicted_y_proba and input_y>
train_step = <operator that minimizes loss>
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
from sklearn.metrics import roc_auc_score
for i in range(5):
loss_i, _ = sess.run([loss, train_step], ###<YOUR CODE: feed values to placeholders>)
print("loss at iter %i: %.4f" % (i, loss_i))
print("train auc:", roc_auc_score(y_train, sess.run(predicted_y_proba, {input_X: X_train})))
print("test auc:", roc_auc_score(y_test, sess.run(predicted_y_proba, {input_X: X_test})))
print ("resulting weights:")
plt.imshow(shared_weights.get_value().reshape(8, -1))
plt.colorbar();
###Output
_____no_output_____
###Markdown
Practice 3: my first tensorflow networkYour ultimate task for this week is to build your first neural network [almost] from scratch and pure tensorflow.This time you will same digit recognition problem, but at a larger scale* images are now 28x28* 10 different digits* 50k samplesNote that you are not required to build 152-layer monsters here. A 2-layer (one hidden, one output) NN should already have ive you an edge over logistic regression.__[bonus score]__If you've already beaten logistic regression with a two-layer net, but enthusiasm still ain't gone, you can try improving the test accuracy even further! The milestones would be 95%/97.5%/98.5% accuraсy on test set.__SPOILER!__At the end of the notebook you will find a few tips and frequently made mistakes. If you feel enough might to shoot yourself in the foot without external assistance, we encourage you to do so, but if you encounter any unsurpassable issues, please do look there before mailing us.
###Code
from mnist import load_dataset
# [down]loading the original MNIST dataset.
# Please note that you should only train your NN on _train sample,
# _val can be used to evaluate out-of-sample error, compare models or perform early-stopping
# _test should be hidden under a rock untill final evaluation... But we both know it is near impossible to catch you evaluating on it.
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
print (X_train.shape,y_train.shape)
plt.imshow(X_train[0,0])
<this cell looks as if it wants you to create variables here>
<you could just as well create a computation graph here - loss, optimizers, all that stuff>
<this may or may not be a good place to run optimizer in a loop>
<this may be a perfect cell to write a training & evaluation loop in>
<predict & evaluate on test here, right? No cheating pls.>
###Output
_____no_output_____ |
process.ipynb | ###Markdown
Building a chatbot
###Code
$ mkdir backend
$ cd backend
$ virtualenv env
$ source env/bin/activate
$ pip install django djangorestframework
$ django-admin.py startproject chatbot_api
$ cd chatbot_api
$ python manage.py startapp message
###Output
_____no_output_____
###Markdown
When it's done, we need to add Django Rest Framework to the installed apps parameter of our project, in chatbot_api/chatbot_api/settings.py :
###Code
# chatbot_api/chatbot_api/settings.py
INSTALLED_APPS = (
...
'rest_framework',
'Message.apps.MessageConfig',
)
$ python manage.py migrate
$ python manage.py runserver
###Output
_____no_output_____
###Markdown
Now we can create a model, it's the class of our object. Here we want to store messages including some text, the firstname of the user and the date of creation.
###Code
# chatbot_api/message/models.py
from django.db import models
class Message(models.Model):
created = models.DateTimeField(auto_now=True)
text = models.TextField()
firstname = models.CharField(max_length=128, default="anonymous")
$ python manage.py migrate
###Output
_____no_output_____
###Markdown
When you receive data on the server, you want to make sure that its correct. Django let us create a serializer, it will read the data received from a request, and translate it as a model. It will also be able to check if the data is valid and serialize samples coming from the dataset to be sent back to the frontend.
###Code
# chatbot_api/message/serializers.py
from rest_framework import serializers
from message.models import Message
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('text', 'firstname')
###Output
_____no_output_____
###Markdown
Good news, we can now create an API view. It's the API interface of a model.Ours will be fairly simple, we only want to be able to receive messages, one by one, from the front-end.
###Code
# chatbot_api/message/views.py
from message.models import Message
from message.serializers import MessageSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
class MessageView(APIView):
def post(self, request, format=None):
serializer = MessageSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response({"speech_answer": "J'ai bien reçu ton message."}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
###Output
_____no_output_____
###Markdown
Once we have our view, we want to actually be able to interact with that view. So we have to register it, to add it to our list of urls.
###Code
# chatbot_api/chatbot_api/urls.py
...
from message.views import MessageView
urlpatterns = [
...
url(r'^message/$', MessageView.as_view()),
]
###Output
_____no_output_____
###Markdown
We're almost done. If you try to run the server, you'll see that it doesn't respond anything intersting. There's no chatbot right now.Let's start an agent on DialogFlow: https://console.dialogflow.com/Next, we need to build a method that is going to call Dialogflow whenever you receive something. Use your own client key in that method.
###Code
# chatbot_api/message/helpers/dialogflow.py
import requests
def query_dialogflow(message):
client_key = "<CLIENT_KEY>"
headers = {"Authorization": "Bearer " + client_key}
payload = {"lang": "fr", "query": message, "v":20150910, "sessionId":1}
r = requests.get('https://api.dialogflow.com/v1/query', params=payload, headers=headers)
return r
###Output
_____no_output_____
###Markdown
Let's update our view to use that method.
###Code
# chatbot_api/message/views.py
from message.models import Message
from message.serializers import MessageSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from message.helpers.dialogflow import query_dialogflow
class MessageView(APIView):
def post(self, request, format=None):
serializer = MessageSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
r = query_dialogflow(serializer.data.get('text'))
json_response = r.json()
response = json_response.get('result', {}).get("fulfillment", {}).get("speech", "Je n'ai pas compris...")
return Response({"speech_answer": response}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
###Output
_____no_output_____
###Markdown
Et voilà !Let's run the server one last time.
###Code
$ python manage.py runserver
###Output
_____no_output_____
###Markdown
This notebook takes the combined data and references produced from get_references and performs post-processing of recorded data including:1. Cleans numerical value strings. 2. Normalizes recorded compositions.3. Generates classifiers used for visualizations.4. Calculates density and Young modulus.
###Code
import os
import re
import pandas as pd
import numpy as np
import pymatgen as mg
from utils import *
# import combined data, add refs
df = pd.read_csv('combined_data.csv')
df_refs = pd.read_csv('references/references.csv')
df = df.merge(df_refs, on='IDENTIFIER: Reference ID', how='left')
df['REFERENCE: doi'] = df['REFERENCE: doi_y']
df.head()
# remove uncertainties / clean numerical values
df = df.applymap(remove_uncertainty)
df['PROPERTY: grain size ($\mu$m)'] = df['PROPERTY: grain size ($\mu$m)'].apply(average_range)
df['PROPERTY: Elongation (%)'] = df['PROPERTY: Elongation (%)'].apply(average_range)
# process columns to ensure data types are accurate
numeric_props = ['PROPERTY: grain size ($\mu$m)', 'PROPERTY: ROM Density (g/cm$^3$)', 'PROPERTY: HV', 'PROPERTY: Test temperature ($^\circ$C)',
'PROPERTY: YS (MPa)', 'PROPERTY: UTS (MPa)', 'PROPERTY: Elongation (%)', 'PROPERTY: Exp. Young modulus (GPa)']
df[numeric_props] = df[numeric_props].apply(pd.to_numeric)
df.head()
# ensure formulas are valid
df['FORMULA'] = df['FORMULA'].apply(normalize_and_alphabetize_formula)
# add classifiers
df['PROPERTY: BCC/FCC/other'] = df.apply(categorize_phases, axis=1)
df['PROPERTY: Processing method'] = df['PROPERTY: synthesis method'].apply(standardize_synthesis_method)
df['PROPERTY: Processing method'] = df['PROPERTY: Processing method'].apply(classify_processing_method)
df['PROPERTY: Microstructure'] = df['PROPERTY: Type of phases'].apply(classify_microstructure)
# calculate props
df['PROPERTY: Calculated Density (g/cm$^3$)'] = df['FORMULA'].apply(calculate_density)
df['PROPERTY: Calculated Young modulus (GPa)'] = df.apply(lambda x: calculate_youngs_modulus(x['FORMULA']) if x['PROPERTY: BCC/FCC/other'] != 'other' else '', axis=1)
df.to_csv(os.path.abspath('stats_and_tables/MPEA_dataset_for_stats.csv'), index=False)
df.head()
# define columns of interest, output dataset file
cols = ['IDENTIFIER: Reference ID', 'FORMULA', 'PROPERTY: Microstructure', 'PROPERTY: Processing method', 'PROPERTY: BCC/FCC/other', 'PROPERTY: grain size ($\\mu$m)',
'PROPERTY: Exp. Density (g/cm$^3$)', 'PROPERTY: Calculated Density (g/cm$^3$)', 'PROPERTY: HV', 'PROPERTY: Type of test', 'PROPERTY: Test temperature ($^\\circ$C)', 'PROPERTY: YS (MPa)', 'PROPERTY: UTS (MPa)',
'PROPERTY: Elongation (%)', 'PROPERTY: Elongation plastic (%)', 'PROPERTY: Exp. Young modulus (GPa)', 'PROPERTY: Calculated Young modulus (GPa)', 'PROPERTY: O content (wppm)', 'PROPERTY: N content (wppm)',
'PROPERTY: C content (wppm)', 'REFERENCE: doi', 'REFERENCE: year', 'REFERENCE: title']
df_output = df[cols]
df_output.to_csv(os.path.abspath('MPEA_dataset.csv'), index=False)
df_output
###Output
_____no_output_____
###Markdown
test
###Code
stat = pd.DataFrame()
stat["start"] = mutate.index
stat
persent = mutate/sum(mutate) * 100
stat["count"] = persent
df2["start"].value_counts(normalize=True)
persent
stat
###Output
_____no_output_____
###Markdown
table gen
###Code
df = pd.read_csv("ori_table.csv")
type(df["start"][0])
df["start"] = df["start"].astype(str)
df = df.merge(stat1, on="start")
df
df.to_csv("demo.csv", index=None)
###Output
_____no_output_____
###Markdown
Wine Data ProcessingThis notebook does some CSV manipulation on my CSV data from [Vivino](https://www.vivino.com).
###Code
import os
import pandas as pd
import qgrid
vivino = pd.read_csv('data/tmp/vivino.csv')
vivino.rename(columns={
'Wine name': 'Name',
'Your rating': 'Rating',
'Regional wine style': 'Style',
'Your review': 'Notes',
'Wine type': 'Type',
'Link to wine': 'Link',
'Scan date': 'Date',
}, inplace=True)
vivino.loc[vivino.Style.isnull(), 'Style'] = ''
vivino.loc[vivino.Rating == 'Not given', 'Rating'] = float('nan')
vivino.loc[vivino.Notes.isnull(), 'Notes'] = ''
vivino.Date = pd.to_datetime(vivino.Date)
columns = [
'Winery',
'Name',
'Vintage',
'Region',
'Country',
'Style',
'Date',
'Rating',
'Notes',
'Type',
'Link',
]
vivino = pd.concat([
vivino[columns],
pd.DataFrame(columns=['Winery_Long', 'Winery_Lat'])
], sort=False).sort_values(by='Date')
###Output
_____no_output_____
###Markdown
Make sure to make this a code cellpending_wines = 6
###Code
edit_my_wines = qgrid.show_grid(vivino.iloc[-pending_wines:])
edit_my_wines
with open(os.path.expanduser('data/wines/data.csv'), 'a') as f:
edit_my_wines.get_changed_df().to_csv(f, index=False, header=False)
###Output
_____no_output_____
###Markdown
Import
###Code
df = pd.read_excel("./input/PlacardProperties_19Aug0930.xlsx")
df.info()
df.head()
###Output
_____no_output_____
###Markdown
Trim
###Code
columns = dict(
Address="address",
CLASSDSCRP="class"
)
trimmed = df.rename(columns=columns).sort_values("address")
trimmed.head()
trimmed['city'] = "Cedar Rapids"
trimmed['state'] = 'IA'
trimmed.to_csv("./output/placards.csv", index=False)
###Output
_____no_output_____
###Markdown
Geocode Records were geocoded at geocod.io
###Code
mapped = pd.read_csv("./input/geocoded.csv")
gdf = gpd.GeoDataFrame(
mapped,
geometry=gpd.points_from_xy(mapped.longitude, mapped.latitude)
)
fig, ax = plt.subplots()
gdf.plot(ax=ax, color="red");
###Output
_____no_output_____
###Markdown
Export
###Code
gdf.to_file("./output/placards.geojson", driver="GeoJSON")
gdf.to_csv("./output/placards.csv", index=False)
###Output
_____no_output_____
###Markdown
Motivation The motivation of this project is to replicate the results of the [repository of Buffalo Capital Management](https://github.com/wzchen/stock_market_prediction) at 2013 and in the future see if markets are still predictable in 2021. The original Kaggle competition is [here](https://www.kaggle.com/c/boston-data-festival-hackathon). - Given the opening, closing, min, max, and volume of a stock in the previous 9 days (and given the opening price of a stock on day 10) can we predict the directional movement of a stock on day 10? **Steps**- Get the data- Exploretoary analysis- Model the time series data using a machine learning model- Applying the startegy on the backtest data to see how the strategy will perform**Files**We only use the training dataset of the original Kaggle competition since the Close price of the stocks are in the dataset so we can find the accuracy of our model.The trainig dataset is then splitted to train, test and backtest datasets.- **process.ipynb**: A Jupyter Notebook describing the process.- **training.csv**: A csv file containing stock ID (StId), opening, closing, min, max, and volume of 94 stock in 500 days. The first opening date price is scaled to 1. Get the data
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('datasets/ohlc_data.csv')
data
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 94 entries, 0 to 93
Columns: 2501 entries, StId to V500
dtypes: float64(2499), int64(2)
memory usage: 1.8 MB
###Markdown
Exploretory Data Analysis Now let's see if the change of price in $n$ previuos days gives prediction power for the change of price in the $n+1$th day. We set $n=9$. we only need open and close of every day:
###Code
import re
# we use regular expression to select MA, MI and V columns in data and drop them
data_cols = data.columns
r = re.compile("^[MA, MI, V]")
drop_cols = filter(r.match, data_cols)
data.drop(columns = drop_cols, axis=1, inplace=True)
data.set_index('StId', inplace=True)
###Output
_____no_output_____
###Markdown
How the closing price of the 94 stocks look like in 500 days?
###Code
# we use regular expression to select Close columns in data
%matplotlib inline
r = re.compile("^C")
close_cols = filter(r.match, data_cols)
date_dis = np.linspace(1,500,6, dtype=int)
ax = data[close_cols].T.plot(figsize=(10,6), legend=False,
xticks=date_dis,
xlabel='Day', ylabel='Scaled close price of the stock', title='Scaled close price of the 94 stocks in the dataset over 500 days')
ax.set_xticklabels(date_dis)
plt.savefig('images/stocks_closePrice.png')
r = re.compile("^C")
close_cols = filter(r.match, data_cols)
close = data.loc[5, close_cols].T
close_renamed = close.rename(lambda s: s.strip("C"))#.plot(figsize=(10,6), legend=False)
close_renamed.index = close_renamed.index.astype('int64')
close_renamed
###Output
_____no_output_____
###Markdown
There are very big jumps on some days that are not explained in the kaggle competition or repository of Buffalo Capital Management. We put 10 days data of stocks into a single dataframe to feed it into the machine learning algorithms.
###Code
memory_days = 10
num_cols = 2 # open and close columns
start_day = 1
last_day = 440 # from start_day to last_day, make 10 day windows. So 450 days for training and 50 days for backtesting
X_windows = [0] # X_windows are stored as days starts at day=1 (workaround)
# scale a dataframe so the first column (opening price) is 1.0
def scaler(df):
for column in df.columns:
df[column] = df[column] / df.iloc[:,0]
return df
for day in range(start_day, last_day+1):
# select the 10 day window
rolling_window = data.iloc[:, day*num_cols-num_cols:(day+memory_days)*num_cols-num_cols]
# scale data to 'O1' column
rolling_window_scaled = scaler(rolling_window)
X_windows.append(rolling_window_scaled)
# set equal column names for all dataframe to concatenate
X_windows[day].columns = X_windows[start_day].columns
# concatenate all 10 day windows in one dataframe
X_windows_list = [X_windows[day] for day in range(start_day, last_day+1)]
X_windows_con = pd.concat(X_windows_list)
# Split the data into train, test and backtest data. The train data is used in machine learning to fit the model and the test data is to ckeck the model on one signle 10 day window.
# The backtest data is used to check the strategy by using the prediction of the model.
n_stocks = 94
train_data = X_windows_con.iloc[:(last_day-1)*n_stocks, :]
test_data = X_windows_con.iloc[(last_day-1)*n_stocks:, :]
backtest_data = data.iloc[:, num_cols*(last_day+memory_days):]
train_data.head()
test_data.head()
backtest_data.head()
# remove 10th day close column of test data
X_test = test_data.drop(columns = ['C10'])
# set the upward change in price as 1 and the downward as 0
y_test = (test_data.loc[:,'C10'] > test_data.loc[:,'O10']).astype('uint8')
# We set 9 days data to be the predictor of the 10th day
X_train = train_data.drop(columns = ['C10'])
# set the upward change in price as 1 and the downward as 0
y_train = (train_data.loc[:,'C10'] > train_data.loc[:,'O10']).astype('uint8')
# find the correlation between the interday and intraday stock movement
x = train_data.loc[:,'O10']/train_data.loc[:,'C9']
y = train_data.loc[:,'C10']/train_data.loc[:,'O10']
m, b = np.polyfit(x, y, 1)
r = y.corr(x)
plt.scatter(x, y)
plt.plot(x, m*x+b, '-r')
plt.text(1,1.15, 'r={:.2f}'.format(r), fontsize=16)
plt.xlim((.7,1.3))
plt.ylim((.7,1.3))
plt.xlabel("O10 / C9")
plt.ylabel("C10 / O10")
plt.title("Correlation between interday and intraday stock movement")
plt.savefig('images/correlation_interday_intraday.png')
plt.show()
###Output
_____no_output_____
###Markdown
The big jumps in some days as we saw in the first figure casues the correltaion (r) to be less negative. We can see the outliers if we zoom out.
###Code
plt.scatter(x, y)
plt.xlim((.2,4.3))
plt.ylim((.2,4.3))
###Output
_____no_output_____
###Markdown
In an ideal case, we should find the reason and correct or clean the dataset. At this phase, we let them stay in the dataset. Model we simply choose the random forset model at this stage of the project.
###Code
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn import metrics,preprocessing
from sklearn.model_selection import cross_val_predict
forest_clf = RandomForestClassifier(random_state=42)
params = forest_clf.get_params()
print(params)
y_predict = cross_val_predict(forest_clf, X_train, y_train, cv=5)
###Output
{'bootstrap': True, 'ccp_alpha': 0.0, 'class_weight': None, 'criterion': 'gini', 'max_depth': None, 'max_features': 'auto', 'max_leaf_nodes': None, 'max_samples': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 100, 'n_jobs': None, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False}
###Markdown
let's look at confusion matrix
###Code
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_train, y_predict)
# normalize confusion matrix
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
norm_conf_mx
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()
from sklearn.metrics import f1_score
f1_score(y_train, y_predict)
###Output
_____no_output_____
###Markdown
Another metric would be AUC.
###Code
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
roc_measure = False
if roc_measure == True:
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train, cv=5, method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train, y_scores_forest)
# Now you are ready to plot the ROC curve. It is useful to plot the first ROC curve as well to see how they compare
plt.plot(fpr_forest, tpr_forest, label="Random Forest")
plt.legend(loc="lower right")
plt.show()
roc_auc_score(y_train, y_scores_forest)
###Output
_____no_output_____
###Markdown
And finally we fit the model.
###Code
forest_clf.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
And finally saving the model.
###Code
import pickle
pickle_out = open("classifier.pkl", mode = "wb")
pickle.dump(forest_clf, pickle_out)
pickle_out.close()
###Output
_____no_output_____
###Markdown
Predict on the test set
###Code
y_pred = forest_clf.predict(X_test)
conf_mx_test = confusion_matrix(y_test, y_pred)
conf_mx_test
# normalize confusion matrix
row_sums = conf_mx_test.sum(axis=1, keepdims=True)
norm_conf_mx_test = conf_mx_test / row_sums
norm_conf_mx_test
plt.matshow(norm_conf_mx_test, cmap=plt.cm.gray)
plt.show()
f1_score(y_test, y_pred)
y_pred
StID = range(1,len(y_pred)+1)
predictions = pd.DataFrame(y_pred, columns=['movement'], index=StID)
predictions.head(10)
predictions.to_csv('predictions/predictions.csv')
###Output
_____no_output_____
###Markdown
Applying the strategy Now we test our model on backtest dataset. To draw the profit/loss over time, we use the `trade_stat_logger` library (source:[here](https://github.com/shilewenuw/trade_stat_logger)). At each day, if the model predicts an increase in price, we buy at opening and sell at close.
###Code
from trade_stat_logger.logger import SimpleLogger
logger = SimpleLogger()
backtest_days = np.arange(memory_days, len(backtest_data.columns)/2, 1, dtype=int)
for day in backtest_days:
# get the close and open of 9 days and the 10th day open
X_backtest = backtest_data.iloc[:, 2*(day-memory_days):2*day-1]
# predict the 10th close
y_predict_10 = forest_clf.predict(X_backtest)
# apply the strategy to all tickers in dataset in that specific day
for index, rows in X_backtest.iterrows():
share_price_open = backtest_data.iloc[index-1, 2*day-2]
share_price_close = backtest_data.iloc[index-1, 2*day-1]
# if the 10th close is higher than open, then buy. if it's lower, do nothing (no short selling for now)
if y_predict_10[index-1] == 1:
logger.log(security = index, shares = 1, share_price = share_price_open)
logger.log(security = index, shares = -1, share_price = share_price_close)
# Analyze the performance of the strategy
logger.graph_statistics(show_window=False)
plt.savefig('./images/graph_statistics.png')
###Output
/home/motare/.local/lib/python3.9/site-packages/trade_stat_logger/logger.py:115: UserWarning: For most accurate performance results, please clear all holdings, which you can do with clear_all_positions()
warn('For most accurate performance results, please clear all holdings, which you can do with clear_all_positions()')
###Markdown
-----------------------------
###Code
import re
import string
import json
from typing import Union, Dict, List, Tuple
from dataclasses import dataclass
import numpy as np
import pandas as pd
from tqdm import tqdm
from transformers import AutoConfig, AutoTokenizer, AutoModel
from summarizer import Summarizer
from gensim.models.word2vec import Word2Vec
from nltk.tokenize import sent_tokenize
from sklearn.cluster import KMeans
from rouge import Rouge
import nltk
nltk.download('punkt')
###Output
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
###Markdown
Set Colab Directory
###Code
from google.colab import drive
drive.mount('/content/drive/')
import os
os.chdir('/content/drive/My Drive/Colab Notebooks/IndoSum')
os.getcwd()
###Output
_____no_output_____
###Markdown
Processing dataset
###Code
data = []
with open('datasets/test.01.jsonl') as file:
for line in file.readlines():
data.append(json.loads(line))
# create a function to flatten the tokens in the 'paragraphs' key
def flatten_paragraphs(list_paragraphs):
list_sentences = []
for paragraph in list_paragraphs:
for sentence in paragraph:
sent = ' '.join(sentence)
list_sentences.append(sent)
return list_sentences
def flatten(nested_list):
final_list = []
for list1 in nested_list:
for list2 in list1:
final_list.append(list2)
return final_list
def flatten_summaries(list_sentences):
final_sentences = []
for sentence in list_sentences:
sent = ' '.join(sentence)
final_sentences.append(sent)
return final_sentences
flatten(data[0]['gold_labels'])
flatten_paragraphs(data[0]['paragraphs'])
flatten_summaries(data[0]['summary'])
for datum in tqdm(data):
datum['flatten_article'] = flatten_paragraphs(datum['paragraphs'])
datum['flatten_summary'] = flatten_summaries(datum['summary'])
with open('datasets/test_01.json', 'w') as file:
file.write(json.dumps(data))
###Output
_____no_output_____
###Markdown
Collect Data
###Code
data = []
with open('datasets/test_01.json', 'r') as file:
data = json.loads(file.read())
X_test = [' '.join(datum['flatten_article']) for datum in data]
y_test = [' '.join(datum['flatten_summary']) for datum in data]
###Output
_____no_output_____
###Markdown
Define Text Preprocessing
###Code
REGEX_URL = r'((http|https)\:\/\/)[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*'
clear_url = lambda text: re.sub(REGEX_URL, ' ', text)
DOT_REGEX = r"(?<!\w)(?:[A-Z][A-Za-z]{,3}|[a-z]{1,2})\."
############################################################################
@dataclass(frozen=True)
class Preprocessing:
"""Preprocessing class used to preprocess news text before Text
Summarization is applied.
- Usage:
```
>>> preprocessor = Preprocessing()
>>> text = "any news text"
>>> site_name = "media site"
>>> clean_text = preprocessor(text, site_name)
```
"""
def _clear_content_head(self, content: str, site_name: str,
head_pattern: str=r"\s\-+\s") -> str:
"""used to clear any head in given news content"""
match = re.search(head_pattern, content)
if match:
idx_end = match.end()
site_name = site_name.split()[0]
if site_name.lower() in content[:idx_end].lower():
content = content[idx_end:]
return content
#################################
def _clear_abbreviation_dot(self, text: str) -> str:
"""used to rip off abbreviation dot in given text"""
# replace any matched abbr with empty string
text_list = list(text)
for i, match in enumerate(re.finditer(DOT_REGEX, text)):
no_dot = match.group().replace('.', '')
idx = match.span()
text_list[idx[0]-i: idx[1]-i] = no_dot
# join list text and clear multiple whitespaces
text = ''.join(text_list)
text = re.sub(' +', ' ', text)
#################################
def __call__(self, content: str, site_name: str) -> Union[str, bool]:
"""the method is used to:
- clear any content head
- clear any heading/tailing whitespace & punct
- clear any abbreviation dot
Args:
- content (str): news content
- site_name (str): news site name
Return:
preprocessed content
"""
content = self._clear_content_head(content, site_name)
content = clear_url(content)
# clear leadding/trailing whitespaces & puncts
content = content.strip(string.punctuation)
content = content.strip()
# change multiple whitespaces to single one
content = re.sub(' +', ' ', content)
# clear whitespace before dot
content = re.sub(r'\s+([?,.!"])', r'\1', content)
return content
sample_text = """Bisnis.com , JAKARTA - Emiten barang konsumen PT Unilever Indonesia Tbk. memutuskan untuk membagikan dividen interim kepada pemegang saham pada akhir tahun ini. Berdasarkan pengumuman perseroan di harian Bisnis Indonesia hari ini, Senin (22/11/2021), emiten dengan kode saham UNVR ini akan membagikan dividen interim senilai total Rp2,51 triliun. Keputusan pembagian dividen ini diambil dalam Rapat Direksi Unilever Indonesia pada 19 November 2021. Dividen interim itu akan diambil dari laba bersih perseroan untuk periode yang berakhir pada 30 Juni 2021. Dengan jumlah pemegang saham UNVR sebanyak 38,15 miliar saham, artinya satu saham UNVR akan mendapat dividen senilai Rp66. Berikut jadwal pelaksanaan dividen interim UNVR: Berdasarkan laporan keuangan per 30 Juni 2021, UNVR membukukan pendapatan senilai Rp20,17 triliun atau turun 7,32 persen dibandingkan periode yang sama tahun sebelumnya Rp21,77 triliun. Laba perseroan terkoreksi 15,85 persen menjadi Rp3,04 triliun dari sebelumnya Rp3,61 triliun. Laba sebelum bunga, pajak, penyusutan, dan amortisasi (EBITDA) turun 13,91 persen menjadi Rp4,55 triliun dari sebelumnya Rp5,29 triliun. Sebelumnya, UNVR tercatat membagikan dividen final tahun buku 2020 senilai Rp3,81 triliun atau Rp100 per saham pada Juni 2021. Dividen tersebut berasal dari laba bersih tahun penuh 2020. Dengan adanya dividen interim yang akan dibayar pada Desember 2021, maka UNVR membagikan total dividen Rp6,31 triliun sepanjang 2021."""
sample_text
preprocessor = Preprocessing()
preprocessor(sample_text, "Bisnis")
###Output
_____no_output_____
###Markdown
Define Summarizers BERT Extractive Summarizer
###Code
# Load model, model config and tokenizer via Transformers
custom_config = AutoConfig.from_pretrained(pretrained_model_name_or_path="indobenchmark/indobert-base-p1")
custom_config.output_hidden_states = True
custom_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="indobenchmark/indobert-base-p1")
custom_model = AutoModel.from_pretrained("indobenchmark/indobert-base-p1", config=custom_config)
# instantiate model
model = Summarizer(custom_model=custom_model, custom_tokenizer=custom_tokenizer)
summary = model(sample_text)
summary
###Output
_____no_output_____
###Markdown
Word2Vec Extractive Summarizer - define `Embedder()` class for word embeddings process
###Code
@dataclass(frozen=True)
class Embedder:
"""This class is used to create word embeddings from given sentence.
The processes implemented are the following:
- convert each token of given sentence to its representative vector;
- calculate mean of all tokens in given sentence in order to get a
sentence embedding.
Arg:
- model: a gensim Word2Vec model
"""
model: Word2Vec
######################
def __get_vector(self, token: str) -> np.ndarray:
"""used to convert given token to its representative vector"""
try:
return self.model.wv.get_vector(token)
except KeyError:
return False
######################
def __averaging(self, token_matrix: np.ndarray) -> np.ndarray:
"""used to calculate mean of an array of vectors in order to get a
sentence embedding"""
return np.mean(token_matrix, axis=0)
######################
def embed(self, sentence: str, return_oov: bool=False) -> np.ndarray:
"""combine all other methods to execute the embedding process.
Args:
- sentence (str): a sentence to be process to get its embedding
- return_oov(bool): indicate if you'd like to return the OOV
(out-of-vocabulary) tokens
Returns:
If all tokens in given sentence are OOV tokens, return False (and with
list of OOVs if 'return_oov' set to True).
else, return the sentence embedding (and with list of OOVs if
'return_oov' set to True).
"""
# make the given sentence lower and collect only words
list_tok = re.findall(r"\w+", sentence.lower())
# buffers
list_vec = []
OOV_tokens = []
# loop through each token of given sentence
for token in list_tok:
tokvec = self.__get_vector(token) # convert to vector
# check if no OOV token produced
if isinstance(tokvec, np.ndarray):
list_vec.append(tokvec)
else:
OOV_tokens.append(token)
# if all tokens in given sentence are OOV tokens
if not list_vec:
if return_oov:
return False, OOV_tokens
return False
# if not
list_vec = np.array(list_vec)
if return_oov:
return (self.__averaging(list_vec), OOV_tokens)
return self.__averaging(list_vec)
###Output
_____no_output_____
###Markdown
- define `Clustering()` class for clustering model process
###Code
@dataclass(frozen=True)
class Clustering:
"""This class is used to cluster sentence embeddings in order to execute
text summarization. The processes implemented are thr following:
- define a KNN clustering model;
- train the model;
- find sentences closest to the cluster's center.
Args:
- features (np.ndarray): sentence embeddings
- random_state (int - optional): random state for random seed
"""
features: np.ndarray
random_state: int = 1
######################
def __define_model(self, k: int) -> None:
"""used to define KNN clustering model"""
model = KMeans(n_clusters=k, random_state=self.random_state)
object.__setattr__(self, 'model', model)
######################
def __find_closest_sents(self, centroids: np.ndarray) -> Dict:
"""
Find the closest arguments to centroid.
- centroids: Centroids to find closest.
- return: Closest arguments.
"""
centroid_min = 1e10
cur_arg = -1
args = {}
used_idx = []
for j, centroid in enumerate(centroids):
for i, feature in enumerate(self.features):
value = np.linalg.norm(feature - centroid)
if value < centroid_min and i not in used_idx:
cur_arg = i
centroid_min = value
used_idx.append(cur_arg)
args[j] = cur_arg
centroid_min = 1e10
cur_arg = -1
return args
######################
def cluster(self, ratio: float = 0.2,
num_sentences: int = None) -> List[int]:
"""
Clusters sentences based on the ratio.
- ratio: Ratio to use for clustering.
- num_sentences: Number of sentences. Overrides ratio.
return: Sentences index that qualify for summary.
"""
# set k value
if num_sentences is not None:
if num_sentences == 0:
return []
k = min(num_sentences, len(self.features))
else:
k = max(int(len(self.features) * ratio), 1)
# define n train the model
self.__define_model(k)
self.model.fit(self.features)
# find the closest embeddings to the center
centroids = self.model.cluster_centers_
cluster_args = self.__find_closest_sents(centroids)
sorted_values = sorted(cluster_args.values())
return sorted_values
###Output
_____no_output_____
###Markdown
- define `Word2VecSummarizer()` class for the main class of Word2Vec Extractive Summarizer
###Code
@dataclass(frozen=True)
class Word2VecSummarizer:
"""The main class for Word2Vec Summarizer
Args:
- model: A gensim Word2Vec model (optional)
- random_state: state for random seed (optional)
"""
def __init__(self, model: Word2Vec, random_state: int=1):
object.__setattr__(self, 'model', model)
object.__setattr__(self, 'random_state', random_state)
######################
def __split_sentence(self, text: str) -> List[str]:
"""used to split given text into sentences"""
sentences = sent_tokenize(text)
return [sent for sent in sentences if len(sent) >= 5]
######################
def __set_embedder(self) -> None:
"""used to instantiate Embedder object"""
embedder = Embedder(self.model)
object.__setattr__(self, 'embedder', embedder)
######################
def __set_clusterer(self, features: np.ndarray,
random_state: int) -> None:
"""used to instantiate Clustering object"""
clusterer = Clustering(features, random_state)
object.__setattr__(self, 'clusterer', clusterer)
######################
def summarize(self, text: str,
use_first: bool = True,
num_sentences: int = None,
ratio: float = 0.2,
return_oov: bool = False) -> Tuple[List[str], np.ndarray]:
"""
This method executes the summarization part.
Args:
- text (str): text to be processed
- use_first (bool-default True): indicate if the first sentence of the text used
- num_sentences (int): whether you'd like to return certain number of summarized sentences (optional)
- ratio (float-default 0.2): ratio of sentences to use
- return_oov(bool-default False): indicate if you'd like to return the OOV
(out-of-vocabulary) tokens
Returns: tuple of sentences and related embeddings (and OOV list if return_oov set to True)
"""
list_sentence = self.__split_sentence(text)
self.__set_embedder()
# set buffers
sent_vecs = []
oov_list = []
# loop through each sentence to create each embeddings
for sentence in list_sentence:
if return_oov:
vec, oov = self.embedder.embed(sentence, return_oov)
oov_list.extend(oov)
else:
vec = self.embedder.embed(sentence, return_oov)
# check if no OOV returned
if isinstance(vec, np.ndarray):
sent_vecs.append(vec)
sent_vecs = np.array(sent_vecs) # create array of all embeddings
# instantiate clustering & process
self.__set_clusterer(sent_vecs, self.random_state)
summary_idx = self.clusterer.cluster(ratio, num_sentences)
if use_first:
if not summary_idx:
summary_idx.append(0)
elif summary_idx[0] != 0:
summary_idx.insert(0, 0)
sentences = [list_sentence[idx] for idx in summary_idx]
embeddings = np.asarray([sent_vecs[idx] for idx in summary_idx])
if return_oov:
return sentences, oov_list
return sentences
MODEL_PATH = "models/model_wv"
MODEL_WORD2VEC = Word2Vec.load(MODEL_PATH)
word2vecsum = Word2VecSummarizer(MODEL_WORD2VEC)
' '.join(word2vecsum.summarize(sample_text))
###Output
_____no_output_____
###Markdown
Evaluation
###Code
list_sitenames = [datum['source'] for datum in data]
# summarize!
summaries = []
for article, site in tqdm(zip(X_test[:500], list_sitenames[:500])):
clean_article = preprocessor(article, site)
sum_bert = model(clean_article)
sum_word2vec = ' '.join(word2vecsum.summarize(clean_article))
summaries.append((sum_bert, sum_word2vec))
###Output
500it [00:15, 32.72it/s]
###Markdown
Evaluate ROUGE
###Code
rouge = Rouge()
rouge_scores_bert = rouge.get_scores(hyps=[summ[0] for summ in summaries],
refs=y_test[:500], avg=True)
rouge_scores_word2vec = rouge.get_scores(hyps=[summ[1] for summ in summaries],
refs=y_test[:500], avg=True)
rouge_scores_bert
rouge_scores_word2vec
# ROUGE metrics for BERT
pd.DataFrame(rouge_scores_bert)
# ROUGE metrics for Word2Vec
pd.DataFrame(rouge_scores_word2vec)
###Output
_____no_output_____
###Markdown
Evaluate Processing Speed - test processing speed of BERT in CPU environment
###Code
%%timeit
for article, site in zip(X_test[:5], list_sitenames[:5]):
clean_article = preprocessor(article, site)
sum_bert = model(clean_article)
###Output
1 loop, best of 5: 11 s per loop
###Markdown
- test processing speed of BERT in GPU environment
###Code
%%timeit
for article, site in zip(X_test[:5], list_sitenames[:5]):
clean_article = preprocessor(article, site)
sum_bert = model(clean_article)
###Output
1 loop, best of 5: 1.63 s per loop
###Markdown
- test processing speed of Word2Vec in CPU environment
###Code
%%timeit
for article, site in zip(X_test[:5], list_sitenames[:5]):
clean_article = preprocessor(article, site)
sum_word2vec = ' '.join(word2vecsum.summarize(clean_article))
pd.DataFrame({'Type': ['BERT-CPU', 'BERT-GPU', 'Word2Vec'],
'Result': ['1 loop, best of 5: 11 s per loop',
'1 loop, best of 5: 1.63 s per loop',
'10 loops, best of 5: 84.4 ms per loop']})
len(data)
###Output
_____no_output_____
###Markdown
ResourcesThis depth model architechture is from: https://arxiv.org/abs/1809.04766We will make changes in the model but the APIs will remain same. All model related files will be in src folder.For preprocessing, we all can use this notebook or we can add a python file additionally.
###Code
img_path = 'examples/ExpKITTI_joint/278_org.png'
img_org = np.array(Image.open(img_path))
depth_y_path = 'examples/ExpKITTI_joint/278_depth.png'
depth_y = np.array(Image.open(depth_y_path))
# processing orignal depth images
assert(np.max(depth_y) > 255)
depth_y = depth_y.astype(np.float) / 256.
###Output
_____no_output_____
###Markdown
PCA
###Code
pca_img = img_org.reshape(375, -1 )
# Increasing n components will increance explained variance but will decrease our accuracy benefits.
pca = PCA(n_components = 64, svd_solver='randomized').fit(pca_img)
pca_img = pca.transform(pca_img)
print(pca_img.shape )
print("Retained variance", np.sum(pca.explained_variance_ratio_))
img = pca.inverse_transform(pca_img)
img = img.reshape(375, 1242, 3)
###Output
(375, 64)
Retained variance 0.9622597384383695
###Markdown
Evaluation
###Code
def RMSE(target, prediction):
return np.sqrt(np.mean((target - prediction)**2))
def measure_duration(img, times = 10):
durations = []
for _ in range(times):
start = time.time()
depth = run(img)
end = time.time()
durations.append(end - start)
return depth, (sum(durations) / times)
depth_pca, duration_pca = measure_duration(img, 10)
depth_no_pca, duration_no_pca = measure_duration(img_org, 10)
diff = duration_no_pca - duration_pca
print("Run time diff ", round(diff, 4))
###Output
Run time diff -0.0016
###Markdown
Imporved accuracy but no benefit in processing time :(
###Code
rmse = RMSE(depth_y, depth_no_pca)
pca_rmse = RMSE(depth_y, depth_pca)
print('accuracy change %: ',(rmse - pca_rmse)*100/rmse )
###Output
accuracy change %: 19.899622033804242
###Markdown
RMSE and time taken with frame skipping
###Code
def runDepthEstimator(start_frame=275, end_frame=300, num_iter=100, isFrameSkippingEnabled=True):
prevPredDepth = depth_no_pca
prevDepthExists = False
runModel = True
totalRmse = 0
start = time.time()
for img in range(start_frame,end_frame):
imgPath = 'examples/kitti_car/0000000'+str(img)+'.png'
depthPath = 'examples/kitti_car/depth/0000000'+str(img)+'.png'
depth_y = np.array(Image.open(depthPath))
# processing orignal depth images
assert(np.max(depth_y) > 255)
depth_y = depth_y.astype(np.float) / 256.
#Run model everytime if frameSkipping is disabled
if(not isFrameSkippingEnabled):
runModel = True
if(runModel):
#Debug statement to identify progress of function
print("Running DL model for frame: ", imgPath)
imgMap = np.array(Image.open(imgPath))
#Run model and obtain depth of image
currDepth, _ = measure_duration(imgMap, num_iter)
#Calculate difference in depth maps
#If difference is below a threshold, skip the next frame and set flag accordingly
if(prevDepthExists and isFrameSkippingEnabled):
depthDiff = RMSE(prevPredDepth, currDepth)
if(depthDiff <= 1.5):
runModel = False
else:
prevDepthExists = True
modelRmse = RMSE(currDepth, depth_y)
prevPredDepth = currDepth
else:
print("Skipping frame ", imgPath)
#Calculate rmse with assumed depth(previous frame depth) instead of calculated depth
#since frame was skipped
modelRmse = RMSE(prevPredDepth, depth_y)
runModel = True
totalRmse += modelRmse
end = time.time()
totalTime = end - start
totalRmse /= (end_frame - start_frame + 1)
return totalTime, totalRmse
# Test model time and accuracy with frame skipping enabled
timeWithSkip, rmseWithSkip = runDepthEstimator(275, 300, 3, True)
###Output
Running DL model for frame: examples/kitti_car/0000000275.png
Running DL model for frame: examples/kitti_car/0000000276.png
Skipping frame examples/kitti_car/0000000277.png
Running DL model for frame: examples/kitti_car/0000000278.png
Skipping frame examples/kitti_car/0000000279.png
Running DL model for frame: examples/kitti_car/0000000280.png
Skipping frame examples/kitti_car/0000000281.png
Running DL model for frame: examples/kitti_car/0000000282.png
Skipping frame examples/kitti_car/0000000283.png
Running DL model for frame: examples/kitti_car/0000000284.png
Skipping frame examples/kitti_car/0000000285.png
Running DL model for frame: examples/kitti_car/0000000286.png
Skipping frame examples/kitti_car/0000000287.png
Running DL model for frame: examples/kitti_car/0000000288.png
Skipping frame examples/kitti_car/0000000289.png
Running DL model for frame: examples/kitti_car/0000000290.png
Skipping frame examples/kitti_car/0000000291.png
Running DL model for frame: examples/kitti_car/0000000292.png
Running DL model for frame: examples/kitti_car/0000000293.png
Running DL model for frame: examples/kitti_car/0000000294.png
Skipping frame examples/kitti_car/0000000295.png
Running DL model for frame: examples/kitti_car/0000000296.png
Running DL model for frame: examples/kitti_car/0000000297.png
Running DL model for frame: examples/kitti_car/0000000298.png
Running DL model for frame: examples/kitti_car/0000000299.png
###Markdown
RMSE and time taken without frame skipping
###Code
timeWithoutSkip, rmseWithoutSkip = runDepthEstimator(275, 300, 3, False)
###Output
Running DL model for frame: examples/kitti_car/0000000275.png
Running DL model for frame: examples/kitti_car/0000000276.png
Running DL model for frame: examples/kitti_car/0000000277.png
Running DL model for frame: examples/kitti_car/0000000278.png
Running DL model for frame: examples/kitti_car/0000000279.png
Running DL model for frame: examples/kitti_car/0000000280.png
Running DL model for frame: examples/kitti_car/0000000281.png
Running DL model for frame: examples/kitti_car/0000000282.png
Running DL model for frame: examples/kitti_car/0000000283.png
Running DL model for frame: examples/kitti_car/0000000284.png
Running DL model for frame: examples/kitti_car/0000000285.png
Running DL model for frame: examples/kitti_car/0000000286.png
Running DL model for frame: examples/kitti_car/0000000287.png
Running DL model for frame: examples/kitti_car/0000000288.png
Running DL model for frame: examples/kitti_car/0000000289.png
Running DL model for frame: examples/kitti_car/0000000290.png
Running DL model for frame: examples/kitti_car/0000000291.png
Running DL model for frame: examples/kitti_car/0000000292.png
Running DL model for frame: examples/kitti_car/0000000293.png
Running DL model for frame: examples/kitti_car/0000000294.png
Running DL model for frame: examples/kitti_car/0000000295.png
Running DL model for frame: examples/kitti_car/0000000296.png
Running DL model for frame: examples/kitti_car/0000000297.png
Running DL model for frame: examples/kitti_car/0000000298.png
Running DL model for frame: examples/kitti_car/0000000299.png
###Markdown
Comparing performance of Depth Estimator with and without skipping
###Code
print("Time and RMSE values for depth estimator with frame skipping:\n")
print("Total time taken in seconds: ", timeWithSkip)
print("Root mean squared error value: ", rmseWithSkip)
print("\n")
print("Time and RMSE values for depth estimator without frame skipping:\n")
print("Total time taken in seconds: ", timeWithoutSkip)
print("Root mean squared error value: ", rmseWithoutSkip)
print("\n")
timeSaved = (float(timeWithoutSkip - timeWithSkip)/timeWithoutSkip) * 100
print("Improvement in time with frame skipping (in percentage):", timeSaved)
rmseChange = (float(rmseWithSkip - rmseWithoutSkip)/rmseWithSkip) * 100
print("RMSE change between with and without frame skipping (in percentage):", rmseChange)
###Output
Time and RMSE values for depth estimator with frame skipping:
Total time taken in seconds: 6.588036775588989
Root mean squared error value: 25.119838235318262
Time and RMSE values for depth estimator without frame skipping:
Total time taken in seconds: 10.0713210105896
Root mean squared error value: 25.115423202954666
Improvement in time with frame skipping (in percentage): 34.58617028826778
RMSE change between with and without frame skipping (in percentage): 0.01757587896162718
###Markdown
Plot
###Code
plt.figure(figsize=(36, 24))
plt.subplot(131)
plt.imshow(img_org)
plt.title('orig img')
plt.axis('off')
plt.subplot(132)
plt.imshow(depth_y, cmap='plasma', vmin=MIN_DEPTH, vmax=MAX_DEPTH)
plt.title('KITTI Orignal depth map')
plt.axis('off')
plt.subplot(133)
plt.imshow(depth_pca, cmap='plasma', vmin=MIN_DEPTH, vmax=MAX_DEPTH)
plt.title('Predicted depth')
plt.axis('off');
###Output
_____no_output_____
###Markdown
Create a symbol to ID mapping that includes both approved symbols and synonyms
###Code
# Synonyms that uniquely map to GeneIDs
symbol_map = {k: v[0] for k, v in synonym_to_ids.items() if len(v) == 1}
# Override synonyms with symbols
symbol_map.update(symbol_to_id)
with open('data/symbol-map.json', 'w') as write_file:
json.dump(symbol_map, write_file, indent=2, sort_keys=True)
###Output
_____no_output_____
###Markdown
dis-cover debian packages analysis
###Code
import pickle
import matplotlib.pyplot as plt
import numpy as np
import requests
from dis_cover.analysis import CppClass
extracted = pickle.load(open("extracted.pickle", "rb"))
data = [data for (data, source) in extracted]
source = [source for (data, source) in extracted]
total_classes = 0
packages_where_class = 0
unique_classes = set()
classes_in_packages = []
distribution = {}
no_rtti_but_classes = 0
no_flag_no_classes = 0
print("The total number of packages that have libgcc1 as a dependency is %d." % len(data))
print("The algorithm that extracted this data focused on binaries in four directories : \n- usr/bin\n- usr/sbin\n- usr/lib\n- usr/games\n")
print("There are a few malformed packages we are not able to analyze.")
for package in data:
files = data[package]
classes_in_this_package = set()
classes = 0
for file in files:
if file in ["failed", "no_file_found", "could_not_extract"]:
print("%17s : %s" % (file, package))
continue
total_classes += len(data[package][file])
for c in data[package][file]:
if c in classes_in_this_package:
continue
classes_in_this_package.add(c)
unique_classes.add(c)
classes += 1
classes_in_packages.append((package, classes))
if distribution.get(classes) == None:
distribution[classes] = 1
else:
distribution[classes] += 1
if classes > 0:
packages_where_class += 1
if len(source[package]):
no_rtti_but_classes += 1
else:
if not len(source[package]):
no_flag_no_classes += 1
print("The number of packages where we were able to extract classes is %d out of %d (%d%%)." % (
packages_where_class,
len(data),
packages_where_class / (len(data) or 1) * 100,
))
print("The total number of classes found is %d, and of those, %d%% were unique across all packages." % (
total_classes,
round(100 * len(unique_classes) / total_classes)
))
print(
"The mean number of unique classes in packages where there are classes is %d." % (total_classes / (packages_where_class or 1)),
)
fig, ax = plt.subplots()
size = 0.3
vals = np.array([
[packages_where_class - no_rtti_but_classes, no_rtti_but_classes],
[len(source) - packages_where_class - no_flag_no_classes, no_flag_no_classes]
])
cmap = plt.get_cmap("tab20c")
outer_colors = cmap(np.arange(3)*4)
inner_colors = cmap([12, 9, 9, 12])
wedges, texts = ax.pie(vals.sum(axis=1), radius=1, colors=outer_colors,
wedgeprops=dict(width=size, edgecolor='w'))
wedges2, texts2 = ax.pie(vals.flatten(), radius=1-size, colors=inner_colors,
wedgeprops=dict(width=size, edgecolor='w'))
ax.set(aspect="equal", title='Repartition of the packages by classes and -fno-rtti')
ax.legend(wedges + wedges2,
[
'classes extracted',
'no classes extracted',
'no mention of -fno-rtti',
'mentions -fno-rtti',
],
title="legend",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.show()
print("A \"Mention\" of `-fno-rtti` means that there were results when running a global grep for `-fno-rtti` in the package's source code")
print("Usually, we see that in a same project, two binaries can be compiled with different flags, which leads to this result.")
classes_in_packages.sort(key=lambda p: p[1], reverse=True)
n = 20
print("Top %d packages by number of unique classes:" % n)
for (p, c) in classes_in_packages[:n]:
print(" - %s has %d classes" % (p ,c))
x = range(classes_in_packages[20][1] + 1)
y = [distribution.get(v) or 0 for v in x]
plt.figure(figsize=(15,4))
plt.plot(x, y)
plt.yscale('log')
plt.title("Class quantity distribution excluding the top 20 packages by number of classes")
plt.xlabel("Number of classes")
plt.ylabel("Number of packages - log scale")
plt.show()
###Output
_____no_output_____
###Markdown
By popularityWe now use data from [popcon](https://popcon.debian.org), the official debian popularity contest, in order to sort and isolate packages by **installation** and **usage**.
###Code
res = requests.get("https://popcon.debian.org/by_inst")
popularity = res.text
classes_dict = dict(classes_in_packages)
packages_with_votes = []
for line in popularity.split("\n")[:-3]:
if line[0] == "#":
continue
(name, inst, vote) = line.split()[1:4]
classes = classes_dict.get(name)
if classes != None:
packages_with_votes.append((name, int(inst), int(vote), classes))
by_inst = sorted(packages_with_votes, key=lambda x: x[1], reverse=True)
by_vote = sorted(packages_with_votes, key=lambda x: x[2], reverse=True)
def print_stats(sorted_list, n):
mean = 0
no_classes = 0
filtered_list = sorted_list[:n]
for v in filtered_list:
mean += v[-1]
if v[-1] == 0:
no_classes += 1
mean /= n
print("There are %d packages with no classes (either -fno-rtti or no virtual methods)." % no_classes)
print("There is a mean of %d unique classes per package" % mean)
n = 100
print("In the top %d C++ packages, sorted by installations:" % n)
print_stats(by_inst, n)
print()
print("In the top %d C++ packages, sorted by usage:" % n)
print_stats(by_vote, n)
n = 1000
print()
print("In the top %d C++ packages, sorted by installations:" % n)
print_stats(by_inst, n)
print()
print("In the top %d C++ packages, sorted by usage:" % n)
print_stats(by_vote, n)
###Output
In the top 100 C++ packages, sorted by installations:
There are 39 packages with no classes (either -fno-rtti or no virtual methods).
There is a mean of 217 unique classes per package
In the top 100 C++ packages, sorted by usage:
There are 29 packages with no classes (either -fno-rtti or no virtual methods).
There is a mean of 234 unique classes per package
In the top 1000 C++ packages, sorted by installations:
There are 390 packages with no classes (either -fno-rtti or no virtual methods).
There is a mean of 103 unique classes per package
In the top 1000 C++ packages, sorted by usage:
There are 360 packages with no classes (either -fno-rtti or no virtual methods).
There is a mean of 110 unique classes per package
###Markdown
Process inputs.
###Code
display(ProcessInputsWidget(process))
###Output
_____no_output_____
###Markdown
Process outputs.
###Code
display(ProcessOutputsWidget(process))
follower = ProcessFollowerWidget(
process,
followers=[ProgressBarWidget(), ProcessReportWidget(), ProcessCallStackWidget(), RunningCalcJobOutputWidget()], path_to_root='../',
update_interval=2)
display(follower)
follower.follow(detach=True)
###Output
_____no_output_____
###Markdown
BCDP CORDEX Example Quick OverviewHere we will do a quick walkthrough of using BCDP to process some regional climate simulations from the Coordinate Regional Downscaling Experiment
###Code
import os
import glob
import numpy as np
import bcdp
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Create a file conventions template. Here we only need the model and variable names so the rest of the file template is filled in by wildcards.
###Code
variable = 'clt'
project = 'CORDEX-Africa'
template = '*_{model}_*_{variable}.nc'
bcdp.build_extractor(project, template, name_field='model', index=[1, 6])
###Output
_____no_output_____
###Markdown
Load the data. Because we have loaded the template, the loader now knows exactly how to extract the required informations from the filenames.
###Code
paths = f'in/*{variable}*'
ens = bcdp.load_local(paths=paths, project=project)
print(ens.size)
###Output
297.91MB
###Markdown
The loader returns an `Ensemble` object, which is essentially a collection of datasets and applies preprocessing operations to each of them. Here we will regrid the data to a coarser (0.88 degree) grid using bilinear interpolation, and consider only the winter months (DJF).
###Code
output_grid = bcdp.utils.grid_from_res((0.88, 0.88), ens.overlap)
ens_u = ens.homogenize(backend='scipy', method='linear',
output_grid=output_grid, clean=False)
###Output
_____no_output_____
###Markdown
Now that the underlying datastructures are homogeneous (same grid and time step), we can convert it to an xarray dataarray which has dimensions (names, time, lat, lon).
###Code
def show_ens(ens):
da = ens.bundle('CORDEX').add_mean('CORDEX').first
da.to_netcdf('CORDEX_Africa_clt.nc')
plt.figure()
da.mean('time').plot(x='x', y='y', col='names', col_wrap=3)
plt.savefig(f'cordex_africa.png')
###Output
_____no_output_____
###Markdown
We can easily visualize the annual climatology with xarray's built-in plotting methods.
###Code
show_ens(ens_u)
###Output
_____no_output_____
###Markdown
Motor curve generator
###Code
from pathlib import Path
import pandas as pd
from motor_parser import *
gear_info = []
working_dir = Path()
motors_dir = working_dir.joinpath('motors')
for path in motors_dir.glob("**/*.yaml"):
motor = parse_config(path)
gear_min_for_high_speed, gear_max_for_high_speed = gearing_for_motor(motor, 212, 6.5, 302.1)
gear_min_for_low_speed, gear_max_for_low_speed = gearing_for_motor(motor, 212, 13.20, 152.79)
if (gear_min_for_high_speed.gear.ratio < gear_min_for_low_speed.gear.ratio):
gear_min = gear_min_for_low_speed
else:
gear_min = gear_min_for_high_speed
if (gear_max_for_high_speed.gear.ratio < gear_max_for_low_speed.gear.ratio):
gear_max = gear_max_for_high_speed
else:
gear_max = gear_max_for_low_speed
motor_gear_specifics = [ motor.name, motor.manufacturer.name, gear_min.gear.ratio, gear_max.gear.ratio]
gear_info.append(motor_gear_specifics)
pd.DataFrame(gear_info, columns=['motor', 'manufacturer', 'minimum gear ratio', 'maximum gear ratio'])
###Output
_____no_output_____
###Markdown
Download and process the Uberon ontology
###Code
import collections
import re
import pandas
import obo
# Download most recent uberon release
! wget --no-verbose --timestamping --directory-prefix download/ http://purl.obolibrary.org/obo/uberon/ext.obo
! wget --no-verbose --timestamping --directory-prefix download/ http://purl.obolibrary.org/obo/uberon/basic.obo
###Output
_____no_output_____
###Markdown
Read and process the ontology
###Code
# Read obo into graph
with open('download/basic.obo') as read_file:
basic = obo.read_obo(read_file)
dict(collections.Counter(key for u, v, key in basic.edges(keys=True)))
# Extract information from the graph
term_rows = []
xref_rows = []
subset_rows = []
for node, data in basic.nodes(data=True):
term_rows.append((node, data['name']))
for xref in data.get('xref', []):
xref_rows.append((node, xref))
for subset in data.get('subset', []):
subset_rows.append((node, subset))
term_df = pandas.DataFrame(term_rows, columns=['uberon_id', 'uberon_name']).sort_values(['uberon_id', 'uberon_name'])
xref_df = pandas.DataFrame(xref_rows, columns=['uberon_id', 'xref']).sort_values(['uberon_id', 'xref'])
subset_df = pandas.DataFrame(subset_rows, columns=['uberon_id', 'subset']).sort_values(['uberon_id', 'subset'])
# Create a dataframe of Uberon terms
term_df.to_csv('data/terms.tsv', sep='\t', index=False)
term_df.head()
# Update MESH IDs that are tree numbers
url = 'https://raw.githubusercontent.com/dhimmel/mesh/b6893d6502deeaa0f702128d9c8bbddff6b4c755/data/tree-numbers.tsv'
tree_number_df = pandas.read_table(url)
tn_to_id = dict(zip(tree_number_df.mesh_tree_number, tree_number_df.mesh_id))
def update_xref(x):
vocab, identifier = x.split(':', 1)
if vocab == 'MESH':
if re.search('D[0-9]{6}', identifier):
return x
return tn_to_id.get(identifier) or x
return x
xref_df.xref = xref_df.xref.map(update_xref)
# Create a dataframe of cross-references
xref_df.to_csv('data/xref.tsv', sep='\t', index=False)
xref_df.head()
# Create a dataframe of term subsets
subset_df.to_csv('data/subset.tsv', sep='\t', index=False)
subset_dict = {subset: set(df.uberon_id) for subset, df in subset_df.groupby('subset')}
subset_df.head()
###Output
_____no_output_____
###Markdown
Create `hetio-slim``hetio-slim` is a subset of terms created for our [specific project](https://dx.doi.org/10.15363/thinklab.4):+ potentially human-relevant (definitively non-human terms are removed)+ in `uberon_slim`+ not in `non_informative`, `upper_level`, `grouping_class`+ have a MeSH cross-reference
###Code
human_df = pandas.read_table('data/human-constraint.tsv')
human_ids = set(human_df.query('no_negative_evidence == 1').uberon_id)
merged_df = term_df[term_df.uberon_id.isin(human_ids)].merge(xref_df)
merged_df['mesh_id'] = merged_df.xref.map(lambda x: x.split(':', 1)[1] if x and x.startswith('MESH:') else '')
merged_df = merged_df[merged_df.mesh_id != ''].drop('xref', 1)
exclude = subset_dict['non_informative'] | subset_dict['upper_level'] | subset_dict['grouping_class']
merged_df = merged_df[-merged_df.uberon_id.isin(exclude)]
merged_df = merged_df[merged_df.uberon_id.isin(subset_dict['uberon_slim'])]
merged_df.head()
len(merged_df)
# Add mesh_name column
url = 'https://raw.githubusercontent.com/dhimmel/mesh/b6893d6502deeaa0f702128d9c8bbddff6b4c755/data/terms.tsv'
mesh_df = pandas.read_table(url)
merged_df = merged_df.merge(mesh_df)
assert not any(merged_df.uberon_id.duplicated())
# Add BTO cross-references. Assumes that uberon-to-bto relationships are one-to-one, which is occaisionally not true.
bto_df = xref_df[xref_df.xref.str.startswith('BTO:').fillna(False)]
bto_df = bto_df.rename(columns={'xref': 'bto_id'})
bto_df = bto_df[bto_df.uberon_id.isin(merged_df.uberon_id)]
merged_df = merged_df.merge(bto_df, how='left').drop_duplicates('uberon_id')
# Save hetio-slim as a tsv
merged_df.to_csv('data/hetio-slim.tsv', index=False, sep='\t')
###Output
_____no_output_____
###Markdown
Subcorrelation matrix
###Code
with open("biomart.json") as b:
eid_to_type = json.load(b)
eid_to_type
def biomart_id_to_type(key):
if (key in eid_to_type):
return eid_to_type[key]
else:
return None
biomart_gene_types = np.transpose([ biomart_id_to_type(str(key)[2:-1]) for key in eids ])
biomart_filtered_types = biomart_gene_types[biomart_gene_types!=None]
counts = np.unique(biomart_filtered_types, return_counts=True)
counts
g_types = {
"ig": ['IG_C_gene', 'IG_C_pseudogene', 'IG_D_gene', 'IG_J_gene',
'IG_J_pseudogene', 'IG_V_gene', 'IG_V_pseudogene'],
"mito": ['Mt_rRNA','Mt_tRNA'],
"tr": ['TR_C_gene', 'TR_J_gene', 'TR_V_gene','TR_V_pseudogene'],
"pseudo": ['pseudogene', 'transcribed_processed_pseudogene',
'transcribed_unitary_pseudogene',
'transcribed_unprocessed_pseudogene',
'translated_processed_pseudogene',
'translated_unprocessed_pseudogene',
'unitary_pseudogene', 'unprocessed_pseudogene',
'polymorphic_pseudogene', 'processed_pseudogene'
],
"rRNA": ["rRNA", 'rRNA_pseudogene'],
"other_RNA": ['scRNA', 'scaRNA','snRNA', 'snoRNA', 'misc_RNA', 'vaultRNA'],
"protein_coding": ["protein_coding"],
"TEC": ["TEC"], "lncRNA": ["lncRNA"], "miRNA": ["miRNA"]
}
flipped = {}
keys = list(g_types.keys())
for i in range(len(g_types)):
key = keys[i]
val = g_types[key]
for j in val:
flipped[j] = key
print(len(flipped))
print(len(counts[0]))
gene_exp = pd.DataFrame(np.matrix(gene_exp))
def subcorrelation(gtypes):
"""
Calculate subcorrelation by looping through the
gene types in order, putting their indices into
categories and then getting the correlation matrix.
"""
g_idx_dict = {"None": []}
for idx in range(len(gtypes)):
gtype = gtypes[idx]
if gtype == None: continue
category = flipped[gtype]
if category not in g_idx_dict:
g_idx_dict[category] = []
g_idx_dict[category].append(idx)
print("The dict has {} keys".format(len(g_idx_dict)))
for key in g_idx_dict:
rows = g_idx_dict[key]
print("{} has {} values".format(key, len(rows)))
gene_matrix = gene_exp.iloc[rows]
corr = np.triu(np.corrcoef(gene_matrix))
np.fill_diagonal(corr, 0)
subcor = data.create_dataset("{}_corr".format(key), data=corr)
gene_order = meta.create_dataset("{}_genes".format(key), data=rows)
subcorrelation(biomart_gene_types)
print(18463+6706+9901+283+1116+279+85+125+18+688)
print(sum(counts[1]))
print(list(data.keys()))
print(list(meta.keys()))
f.close()
###Output
['None_corr', 'TEC_corr', 'expression', 'ig_corr', 'lncRNA_corr', 'miRNA_corr', 'mito_corr', 'other_RNA_corr', 'processed_expression', 'protein_coding_corr', 'pseudo_corr', 'rRNA_corr', 'tr_corr']
['None_genes', 'TEC_genes', 'ensembl_id', 'genes', 'ig_genes', 'lncRNA_genes', 'miRNA_genes', 'mito_genes', 'other_RNA_genes', 'protein_coding_genes', 'pseudo_genes', 'rRNA_genes', 'tr_genes']
###Markdown
Correlation matrix
###Code
# get the upper triangle and make the diagonal zero of the correlation matrix
cor = np.triu(np.corrcoef(exp))
np.fill_diagonal(cor,0)
# correlation matrix
cor
correlation_matrix = f.create_dataset("correlation_matrix", data=cor)
cor = f['correlation_matrix']
# flatten the matrix to get the counts
flat = np.matrix(cor).flatten()
# remove the zero values in the matrix
flat = flat[flat != 0]
# get the counts for the unique letters in the matrix
counts = np.unique(flat, return_counts=True)
counts
# Number of 1 values
counts[1][-1]
# Number of counts total (total number of correlations)
sum(counts[1])
counts
high = counts[0][-4900:]
for index in range(len(high)):
val = high[index]
if val >= 0.99:
print(index+4900)
break
sum(counts[1][(-4900+75):])
###Output
_____no_output_____
###Markdown
Global
###Code
%matplotlib inline
importlib.reload(ut)
plt.rcParams['text.usetex'] = True
plt.style.use(['dark_background'])
plt.style.use(['default'])
###Output
_____no_output_____
###Markdown
Data
###Code
# load data
data = ut.load_data(os.path.join('data', 'v8-ortho', 'forest-23*', '*.zip'))
# simulation
index = 0
simulation = list(data.keys())[index]
print('simulations:\n', '\n '.join([f'{i}: ' + (f'{x} [x]' if x == simulation else x) for i, x in enumerate(list(data.keys()))]))
# load images
df = data[simulation]['images']
df = df[df['type'] == 'monochrome']
df = df.reset_index(drop=True)
print('images:', df.shape[0])
# load parameters
parameters = data[simulation]['parameters']
parameters['images'] = df.shape[0]
print('\nparameters:', json.dumps(parameters, indent=4))
###Output
_____no_output_____
###Markdown
Plots
###Code
fig, ax = plt.subplots(figsize=(16, 16))
# plot stage image
ut.plot_image(ax, data[simulation]['stage'], 'stage')
# plot sample images
ut.plot_images(df['data'], df['name'])
df_integrate = df[:18]
# integrate images
integrated = ut.integrate_image(df_integrate, parameters, N=30)
# grayscale images
grayscaled = np.array([ut.grayscale_image(x) for x in df_integrate['data']])
image_vs_integrated = np.hstack([grayscaled, integrated]).reshape(integrated.shape * np.array([2, 1, 1]))
image_vs_integrated_labels = np.dstack([df_integrate['name'], df_integrate['name'] + '-integral']).flatten()
# plot integrated images
ut.plot_images(image_vs_integrated, image_vs_integrated_labels, rows=6, cols=6)
# integrate ground
ground, alphas = ut.integrate_ground(df, parameters)
# aggregate alphas
df_alpha = ut.aggregate_alphas(alphas, sample=None)
# calculate ground visibility
scanned = np.count_nonzero(ground[:, :, 0])
captured = np.count_nonzero(ground[:, :, 1])
visibility = captured / scanned
fig, axs = plt.subplots(1, 3, figsize=(24, 6))
# plot ground
ut.plot_heatmap(axs[0], ground[:, :, 0], 'scanned pixels (count)')
ut.plot_heatmap(axs[1], ground[:, :, 1], 'visible pixels (count)')
ut.plot_heatmap(axs[2], ut.normalize_image(ground[:, :, 1] > 0), f'visibility ({visibility:.2f})')
fig, axs = plt.subplots(1, 3, figsize=(24, 6))
# plot alpha
for i, column in enumerate(['scanned', 'visible', 'ratio']):
df_alpha.plot(kind='line', x='alpha', y=column, label=f'{column} (mean)', ax=axs[i])
# density
density = 1 - visibility
mean = 1 - (ground[:, :, 1] > 0).mean()
print('density:', density)
print('mean:', mean)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.