path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Model backlog/Models/26-openvaccine-lstm-wave-net.ipynb | ###Markdown
Dependencies
###Code
import warnings, json, random, os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import mean_squared_error
import tensorflow as tf
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras import optimizers, losses, Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
SEED = 0
seed_everything(SEED)
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Model parameters
###Code
config = {
"BATCH_SIZE": 64,
"EPOCHS": 120,
"LEARNING_RATE": 1e-3,
"ES_PATIENCE": 10,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"PB_SEQ_LEN": 107,
"PV_SEQ_LEN": 130,
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
###Output
_____no_output_____
###Markdown
Load data
###Code
database_base_path = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(database_base_path + 'train.json', lines=True)
test = pd.read_json(database_base_path + 'test.json', lines=True)
print('Train samples: %d' % len(train))
display(train.head())
print(f'Test samples: {len(test)}')
display(test.head())
###Output
Train samples: 2400
###Markdown
Auxiliary functions
###Code
token2int = {x:i for i, x in enumerate('().ACGUBEHIMSX')}
token2int_seq = {x:i for i, x in enumerate('ACGU')}
token2int_struct = {x:i for i, x in enumerate('().')}
token2int_loop = {x:i for i, x in enumerate('BEHIMSX')}
def plot_metrics(history):
metric_list = [m for m in list(history_list[0].keys()) if m is not 'lr']
size = len(metric_list)//2
fig, axes = plt.subplots(size, 1, sharex='col', figsize=(20, size * 5))
if size > 1:
axes = axes.flatten()
else:
axes = [axes]
for index in range(len(metric_list)//2):
metric_name = metric_list[index]
val_metric_name = metric_list[index+size]
axes[index].plot(history[metric_name], label='Train %s' % metric_name)
axes[index].plot(history[val_metric_name], label='Validation %s' % metric_name)
axes[index].legend(loc='best', fontsize=16)
axes[index].set_title(metric_name)
axes[index].axvline(np.argmin(history[metric_name]), linestyle='dashed')
axes[index].axvline(np.argmin(history[val_metric_name]), linestyle='dashed', color='orange')
plt.xlabel('Epochs', fontsize=16)
sns.despine()
plt.show()
def preprocess_inputs(df, encoder, cols=['sequence', 'structure', 'predicted_loop_type']):
input_lists = df[cols].applymap(lambda seq: [encoder[x] for x in seq]).values.tolist()
return np.transpose(np.array(input_lists), (0, 2, 1))
def evaluate_model(df, y_true, y_pred, target_cols):
# Complete data
metrics = []
metrics_clean_sn = []
metrics_noisy_sn = []
metrics_clean_sig = []
metrics_noisy_sig = []
for idx, col in enumerate(pred_cols):
metrics.append(np.sqrt(np.mean((y_true[:, :, idx] - y_pred[:, :, idx])**2)))
target_cols = ['Overall'] + target_cols
metrics = [np.mean(metrics)] + metrics
# SN_filter = 1
idxs = df[df['SN_filter'] == 1].index
for idx, col in enumerate(pred_cols):
metrics_clean_sn.append(np.sqrt(np.mean((y_true[idxs, :, idx] - y_pred[idxs, :, idx])**2)))
metrics_clean_sn = [np.mean(metrics_clean_sn)] + metrics_clean_sn
# SN_filter = 0
idxs = df[df['SN_filter'] == 0].index
for idx, col in enumerate(pred_cols):
metrics_noisy_sn.append(np.sqrt(np.mean((y_true[idxs, :, idx] - y_pred[idxs, :, idx])**2)))
metrics_noisy_sn = [np.mean(metrics_noisy_sn)] + metrics_noisy_sn
# signal_to_noise > 1
idxs = df[df['signal_to_noise'] > 1].index
for idx, col in enumerate(pred_cols):
metrics_clean_sig.append(np.sqrt(np.mean((y_true[idxs, :, idx] - y_pred[idxs, :, idx])**2)))
metrics_clean_sig = [np.mean(metrics_clean_sig)] + metrics_clean_sig
# signal_to_noise <= 1
idxs = df[df['signal_to_noise'] <= 1].index
for idx, col in enumerate(pred_cols):
metrics_noisy_sig.append(np.sqrt(np.mean((y_true[idxs, :, idx] - y_pred[idxs, :, idx])**2)))
metrics_noisy_sig = [np.mean(metrics_noisy_sig)] + metrics_noisy_sig
metrics_df = pd.DataFrame({'Metric/MCRMSE': target_cols, 'Complete': metrics, 'Clean (SN)': metrics_clean_sn,
'Noisy (SN)': metrics_noisy_sn, 'Clean (signal)': metrics_clean_sig,
'Noisy (signal)': metrics_noisy_sig})
return metrics_df
def get_dataset(x, y=None, labeled=True, shuffled=True, batch_size=32, buffer_size=-1, seed=0):
if labeled:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
else:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(AUTO)
return dataset
def get_dataset_sampling(x, y=None, shuffled=True, seed=0):
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
return dataset
###Output
_____no_output_____
###Markdown
Model
###Code
def MCRMSE(y_true, y_pred):
colwise_mse = tf.reduce_mean(tf.square(y_true - y_pred), axis=1)
return tf.reduce_mean(tf.sqrt(colwise_mse), axis=1)
def wave_block(x, filters, kernel_size, n):
dilation_rates = [2 ** i for i in range(n)]
x = L.Conv1D(filters=filters,
kernel_size = 1,
padding='same')(x)
res_x = x
for dilation_rate in dilation_rates:
tanh_out = L.Conv1D(filters=filters,
kernel_size=kernel_size,
padding='same',
activation='tanh',
dilation_rate=dilation_rate)(x)
sigm_out = L.Conv1D(filters=filters,
kernel_size=kernel_size,
padding='same',
activation='sigmoid',
dilation_rate=dilation_rate)(x)
x = L.Multiply()([tanh_out, sigm_out])
x = L.Conv1D(filters=filters,
kernel_size=1,
padding='same')(x)
res_x = L.Add()([res_x, x])
return res_x
def model_fn(embed_dim=160, hidden_dim=384, dropout=.5, sp_dropout=.2, pred_len=68, n_outputs=5):
inputs_seq = L.Input(shape=(None, 1), name='inputs_seq')
inputs_struct = L.Input(shape=(None, 1), name='inputs_struct')
inputs_loop = L.Input(shape=(None, 1), name='inputs_loop')
shared_embed = L.Embedding(input_dim=len(token2int), output_dim=embed_dim, name='shared_embedding')
embed_seq = shared_embed(inputs_seq)
embed_struct = shared_embed(inputs_struct)
embed_loop = shared_embed(inputs_loop)
x_concat = L.concatenate([embed_seq, embed_struct, embed_loop], axis=2, name='embedding_concatenate')
x_reshaped = L.Reshape((-1, x_concat.shape[2]*x_concat.shape[3]))(x_concat)
x = L.SpatialDropout1D(sp_dropout)(x_reshaped)
x = L.Bidirectional(L.LSTM(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
x_1 = L.Bidirectional(L.LSTM(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
x_1 = L.Add()([x_1, x])
# Wave net
x = wave_block(x_1, 16, 3, 12)
x = L.BatchNormalization()(x)
x = L.Dropout(.1)(x)
x = wave_block(x, 32, 3, 8)
x = L.BatchNormalization()(x)
x = L.Dropout(.1)(x)
x = wave_block(x, 64, 3, 4)
x = L.BatchNormalization()(x)
x = L.Dropout(.1)(x)
x = wave_block(x, 128, 3, 1)
x = L.BatchNormalization()(x)
x = L.Dropout(.1)(x)
x_2 = L.Bidirectional(L.LSTM(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
x = L.Add()([x_2, x_1])
# Since we are only making predictions on the first part of each sequence, we have to truncate it
x_truncated = x[:, :pred_len]
outputs = L.Dense(n_outputs, activation='linear', name='outputs')(x_truncated)
model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop], outputs=outputs)
opt = optimizers.Adam(learning_rate=config['LEARNING_RATE'])
model.compile(optimizer=opt, loss=MCRMSE)
return model
model = model_fn()
model.summary()
###Output
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
inputs_seq (InputLayer) [(None, None, 1)] 0
__________________________________________________________________________________________________
inputs_struct (InputLayer) [(None, None, 1)] 0
__________________________________________________________________________________________________
inputs_loop (InputLayer) [(None, None, 1)] 0
__________________________________________________________________________________________________
shared_embedding (Embedding) (None, None, 1, 160) 2240 inputs_seq[0][0]
inputs_struct[0][0]
inputs_loop[0][0]
__________________________________________________________________________________________________
embedding_concatenate (Concaten (None, None, 3, 160) 0 shared_embedding[0][0]
shared_embedding[1][0]
shared_embedding[2][0]
__________________________________________________________________________________________________
reshape (Reshape) (None, None, 480) 0 embedding_concatenate[0][0]
__________________________________________________________________________________________________
spatial_dropout1d (SpatialDropo (None, None, 480) 0 reshape[0][0]
__________________________________________________________________________________________________
bidirectional (Bidirectional) (None, None, 768) 2657280 spatial_dropout1d[0][0]
__________________________________________________________________________________________________
bidirectional_1 (Bidirectional) (None, None, 768) 3542016 bidirectional[0][0]
__________________________________________________________________________________________________
add (Add) (None, None, 768) 0 bidirectional_1[0][0]
bidirectional[0][0]
__________________________________________________________________________________________________
conv1d (Conv1D) (None, None, 16) 12304 add[0][0]
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, None, 16) 784 conv1d[0][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, None, 16) 784 conv1d[0][0]
__________________________________________________________________________________________________
multiply (Multiply) (None, None, 16) 0 conv1d_1[0][0]
conv1d_2[0][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, None, 16) 272 multiply[0][0]
__________________________________________________________________________________________________
conv1d_4 (Conv1D) (None, None, 16) 784 conv1d_3[0][0]
__________________________________________________________________________________________________
conv1d_5 (Conv1D) (None, None, 16) 784 conv1d_3[0][0]
__________________________________________________________________________________________________
multiply_1 (Multiply) (None, None, 16) 0 conv1d_4[0][0]
conv1d_5[0][0]
__________________________________________________________________________________________________
conv1d_6 (Conv1D) (None, None, 16) 272 multiply_1[0][0]
__________________________________________________________________________________________________
conv1d_7 (Conv1D) (None, None, 16) 784 conv1d_6[0][0]
__________________________________________________________________________________________________
conv1d_8 (Conv1D) (None, None, 16) 784 conv1d_6[0][0]
__________________________________________________________________________________________________
multiply_2 (Multiply) (None, None, 16) 0 conv1d_7[0][0]
conv1d_8[0][0]
__________________________________________________________________________________________________
conv1d_9 (Conv1D) (None, None, 16) 272 multiply_2[0][0]
__________________________________________________________________________________________________
conv1d_10 (Conv1D) (None, None, 16) 784 conv1d_9[0][0]
__________________________________________________________________________________________________
conv1d_11 (Conv1D) (None, None, 16) 784 conv1d_9[0][0]
__________________________________________________________________________________________________
multiply_3 (Multiply) (None, None, 16) 0 conv1d_10[0][0]
conv1d_11[0][0]
__________________________________________________________________________________________________
conv1d_12 (Conv1D) (None, None, 16) 272 multiply_3[0][0]
__________________________________________________________________________________________________
conv1d_13 (Conv1D) (None, None, 16) 784 conv1d_12[0][0]
__________________________________________________________________________________________________
conv1d_14 (Conv1D) (None, None, 16) 784 conv1d_12[0][0]
__________________________________________________________________________________________________
multiply_4 (Multiply) (None, None, 16) 0 conv1d_13[0][0]
conv1d_14[0][0]
__________________________________________________________________________________________________
conv1d_15 (Conv1D) (None, None, 16) 272 multiply_4[0][0]
__________________________________________________________________________________________________
conv1d_16 (Conv1D) (None, None, 16) 784 conv1d_15[0][0]
__________________________________________________________________________________________________
conv1d_17 (Conv1D) (None, None, 16) 784 conv1d_15[0][0]
__________________________________________________________________________________________________
multiply_5 (Multiply) (None, None, 16) 0 conv1d_16[0][0]
conv1d_17[0][0]
__________________________________________________________________________________________________
conv1d_18 (Conv1D) (None, None, 16) 272 multiply_5[0][0]
__________________________________________________________________________________________________
conv1d_19 (Conv1D) (None, None, 16) 784 conv1d_18[0][0]
__________________________________________________________________________________________________
conv1d_20 (Conv1D) (None, None, 16) 784 conv1d_18[0][0]
__________________________________________________________________________________________________
multiply_6 (Multiply) (None, None, 16) 0 conv1d_19[0][0]
conv1d_20[0][0]
__________________________________________________________________________________________________
conv1d_21 (Conv1D) (None, None, 16) 272 multiply_6[0][0]
__________________________________________________________________________________________________
conv1d_22 (Conv1D) (None, None, 16) 784 conv1d_21[0][0]
__________________________________________________________________________________________________
conv1d_23 (Conv1D) (None, None, 16) 784 conv1d_21[0][0]
__________________________________________________________________________________________________
multiply_7 (Multiply) (None, None, 16) 0 conv1d_22[0][0]
conv1d_23[0][0]
__________________________________________________________________________________________________
conv1d_24 (Conv1D) (None, None, 16) 272 multiply_7[0][0]
__________________________________________________________________________________________________
conv1d_25 (Conv1D) (None, None, 16) 784 conv1d_24[0][0]
__________________________________________________________________________________________________
conv1d_26 (Conv1D) (None, None, 16) 784 conv1d_24[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, None, 16) 0 conv1d[0][0]
conv1d_3[0][0]
__________________________________________________________________________________________________
multiply_8 (Multiply) (None, None, 16) 0 conv1d_25[0][0]
conv1d_26[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, None, 16) 0 add_1[0][0]
conv1d_6[0][0]
__________________________________________________________________________________________________
conv1d_27 (Conv1D) (None, None, 16) 272 multiply_8[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, None, 16) 0 add_2[0][0]
conv1d_9[0][0]
__________________________________________________________________________________________________
conv1d_28 (Conv1D) (None, None, 16) 784 conv1d_27[0][0]
__________________________________________________________________________________________________
conv1d_29 (Conv1D) (None, None, 16) 784 conv1d_27[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, None, 16) 0 add_3[0][0]
conv1d_12[0][0]
__________________________________________________________________________________________________
multiply_9 (Multiply) (None, None, 16) 0 conv1d_28[0][0]
conv1d_29[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, None, 16) 0 add_4[0][0]
conv1d_15[0][0]
__________________________________________________________________________________________________
conv1d_30 (Conv1D) (None, None, 16) 272 multiply_9[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, None, 16) 0 add_5[0][0]
conv1d_18[0][0]
__________________________________________________________________________________________________
conv1d_31 (Conv1D) (None, None, 16) 784 conv1d_30[0][0]
__________________________________________________________________________________________________
conv1d_32 (Conv1D) (None, None, 16) 784 conv1d_30[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, None, 16) 0 add_6[0][0]
conv1d_21[0][0]
__________________________________________________________________________________________________
multiply_10 (Multiply) (None, None, 16) 0 conv1d_31[0][0]
conv1d_32[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, None, 16) 0 add_7[0][0]
conv1d_24[0][0]
__________________________________________________________________________________________________
conv1d_33 (Conv1D) (None, None, 16) 272 multiply_10[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, None, 16) 0 add_8[0][0]
conv1d_27[0][0]
__________________________________________________________________________________________________
conv1d_34 (Conv1D) (None, None, 16) 784 conv1d_33[0][0]
__________________________________________________________________________________________________
conv1d_35 (Conv1D) (None, None, 16) 784 conv1d_33[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, None, 16) 0 add_9[0][0]
conv1d_30[0][0]
__________________________________________________________________________________________________
multiply_11 (Multiply) (None, None, 16) 0 conv1d_34[0][0]
conv1d_35[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, None, 16) 0 add_10[0][0]
conv1d_33[0][0]
__________________________________________________________________________________________________
conv1d_36 (Conv1D) (None, None, 16) 272 multiply_11[0][0]
__________________________________________________________________________________________________
add_12 (Add) (None, None, 16) 0 add_11[0][0]
conv1d_36[0][0]
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, None, 16) 64 add_12[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, None, 16) 0 batch_normalization[0][0]
__________________________________________________________________________________________________
conv1d_37 (Conv1D) (None, None, 32) 544 dropout[0][0]
__________________________________________________________________________________________________
conv1d_38 (Conv1D) (None, None, 32) 3104 conv1d_37[0][0]
__________________________________________________________________________________________________
conv1d_39 (Conv1D) (None, None, 32) 3104 conv1d_37[0][0]
__________________________________________________________________________________________________
multiply_12 (Multiply) (None, None, 32) 0 conv1d_38[0][0]
conv1d_39[0][0]
__________________________________________________________________________________________________
conv1d_40 (Conv1D) (None, None, 32) 1056 multiply_12[0][0]
__________________________________________________________________________________________________
conv1d_41 (Conv1D) (None, None, 32) 3104 conv1d_40[0][0]
__________________________________________________________________________________________________
conv1d_42 (Conv1D) (None, None, 32) 3104 conv1d_40[0][0]
__________________________________________________________________________________________________
multiply_13 (Multiply) (None, None, 32) 0 conv1d_41[0][0]
conv1d_42[0][0]
__________________________________________________________________________________________________
conv1d_43 (Conv1D) (None, None, 32) 1056 multiply_13[0][0]
__________________________________________________________________________________________________
conv1d_44 (Conv1D) (None, None, 32) 3104 conv1d_43[0][0]
__________________________________________________________________________________________________
conv1d_45 (Conv1D) (None, None, 32) 3104 conv1d_43[0][0]
__________________________________________________________________________________________________
multiply_14 (Multiply) (None, None, 32) 0 conv1d_44[0][0]
conv1d_45[0][0]
__________________________________________________________________________________________________
conv1d_46 (Conv1D) (None, None, 32) 1056 multiply_14[0][0]
__________________________________________________________________________________________________
conv1d_47 (Conv1D) (None, None, 32) 3104 conv1d_46[0][0]
__________________________________________________________________________________________________
conv1d_48 (Conv1D) (None, None, 32) 3104 conv1d_46[0][0]
__________________________________________________________________________________________________
multiply_15 (Multiply) (None, None, 32) 0 conv1d_47[0][0]
conv1d_48[0][0]
__________________________________________________________________________________________________
conv1d_49 (Conv1D) (None, None, 32) 1056 multiply_15[0][0]
__________________________________________________________________________________________________
conv1d_50 (Conv1D) (None, None, 32) 3104 conv1d_49[0][0]
__________________________________________________________________________________________________
conv1d_51 (Conv1D) (None, None, 32) 3104 conv1d_49[0][0]
__________________________________________________________________________________________________
multiply_16 (Multiply) (None, None, 32) 0 conv1d_50[0][0]
conv1d_51[0][0]
__________________________________________________________________________________________________
conv1d_52 (Conv1D) (None, None, 32) 1056 multiply_16[0][0]
__________________________________________________________________________________________________
conv1d_53 (Conv1D) (None, None, 32) 3104 conv1d_52[0][0]
__________________________________________________________________________________________________
conv1d_54 (Conv1D) (None, None, 32) 3104 conv1d_52[0][0]
__________________________________________________________________________________________________
multiply_17 (Multiply) (None, None, 32) 0 conv1d_53[0][0]
conv1d_54[0][0]
__________________________________________________________________________________________________
add_13 (Add) (None, None, 32) 0 conv1d_37[0][0]
conv1d_40[0][0]
__________________________________________________________________________________________________
conv1d_55 (Conv1D) (None, None, 32) 1056 multiply_17[0][0]
__________________________________________________________________________________________________
add_14 (Add) (None, None, 32) 0 add_13[0][0]
conv1d_43[0][0]
__________________________________________________________________________________________________
conv1d_56 (Conv1D) (None, None, 32) 3104 conv1d_55[0][0]
__________________________________________________________________________________________________
conv1d_57 (Conv1D) (None, None, 32) 3104 conv1d_55[0][0]
__________________________________________________________________________________________________
add_15 (Add) (None, None, 32) 0 add_14[0][0]
conv1d_46[0][0]
__________________________________________________________________________________________________
multiply_18 (Multiply) (None, None, 32) 0 conv1d_56[0][0]
conv1d_57[0][0]
__________________________________________________________________________________________________
add_16 (Add) (None, None, 32) 0 add_15[0][0]
conv1d_49[0][0]
__________________________________________________________________________________________________
conv1d_58 (Conv1D) (None, None, 32) 1056 multiply_18[0][0]
__________________________________________________________________________________________________
add_17 (Add) (None, None, 32) 0 add_16[0][0]
conv1d_52[0][0]
__________________________________________________________________________________________________
conv1d_59 (Conv1D) (None, None, 32) 3104 conv1d_58[0][0]
__________________________________________________________________________________________________
conv1d_60 (Conv1D) (None, None, 32) 3104 conv1d_58[0][0]
__________________________________________________________________________________________________
add_18 (Add) (None, None, 32) 0 add_17[0][0]
conv1d_55[0][0]
__________________________________________________________________________________________________
multiply_19 (Multiply) (None, None, 32) 0 conv1d_59[0][0]
conv1d_60[0][0]
__________________________________________________________________________________________________
add_19 (Add) (None, None, 32) 0 add_18[0][0]
conv1d_58[0][0]
__________________________________________________________________________________________________
conv1d_61 (Conv1D) (None, None, 32) 1056 multiply_19[0][0]
__________________________________________________________________________________________________
add_20 (Add) (None, None, 32) 0 add_19[0][0]
conv1d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, None, 32) 128 add_20[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, None, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv1d_62 (Conv1D) (None, None, 64) 2112 dropout_1[0][0]
__________________________________________________________________________________________________
conv1d_63 (Conv1D) (None, None, 64) 12352 conv1d_62[0][0]
__________________________________________________________________________________________________
conv1d_64 (Conv1D) (None, None, 64) 12352 conv1d_62[0][0]
__________________________________________________________________________________________________
multiply_20 (Multiply) (None, None, 64) 0 conv1d_63[0][0]
conv1d_64[0][0]
__________________________________________________________________________________________________
conv1d_65 (Conv1D) (None, None, 64) 4160 multiply_20[0][0]
__________________________________________________________________________________________________
conv1d_66 (Conv1D) (None, None, 64) 12352 conv1d_65[0][0]
__________________________________________________________________________________________________
conv1d_67 (Conv1D) (None, None, 64) 12352 conv1d_65[0][0]
__________________________________________________________________________________________________
multiply_21 (Multiply) (None, None, 64) 0 conv1d_66[0][0]
conv1d_67[0][0]
__________________________________________________________________________________________________
conv1d_68 (Conv1D) (None, None, 64) 4160 multiply_21[0][0]
__________________________________________________________________________________________________
conv1d_69 (Conv1D) (None, None, 64) 12352 conv1d_68[0][0]
__________________________________________________________________________________________________
conv1d_70 (Conv1D) (None, None, 64) 12352 conv1d_68[0][0]
__________________________________________________________________________________________________
multiply_22 (Multiply) (None, None, 64) 0 conv1d_69[0][0]
conv1d_70[0][0]
__________________________________________________________________________________________________
conv1d_71 (Conv1D) (None, None, 64) 4160 multiply_22[0][0]
__________________________________________________________________________________________________
add_21 (Add) (None, None, 64) 0 conv1d_62[0][0]
conv1d_65[0][0]
__________________________________________________________________________________________________
conv1d_72 (Conv1D) (None, None, 64) 12352 conv1d_71[0][0]
__________________________________________________________________________________________________
conv1d_73 (Conv1D) (None, None, 64) 12352 conv1d_71[0][0]
__________________________________________________________________________________________________
add_22 (Add) (None, None, 64) 0 add_21[0][0]
conv1d_68[0][0]
__________________________________________________________________________________________________
multiply_23 (Multiply) (None, None, 64) 0 conv1d_72[0][0]
conv1d_73[0][0]
__________________________________________________________________________________________________
add_23 (Add) (None, None, 64) 0 add_22[0][0]
conv1d_71[0][0]
__________________________________________________________________________________________________
conv1d_74 (Conv1D) (None, None, 64) 4160 multiply_23[0][0]
__________________________________________________________________________________________________
add_24 (Add) (None, None, 64) 0 add_23[0][0]
conv1d_74[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, None, 64) 256 add_24[0][0]
__________________________________________________________________________________________________
dropout_2 (Dropout) (None, None, 64) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv1d_75 (Conv1D) (None, None, 128) 8320 dropout_2[0][0]
__________________________________________________________________________________________________
conv1d_76 (Conv1D) (None, None, 128) 49280 conv1d_75[0][0]
__________________________________________________________________________________________________
conv1d_77 (Conv1D) (None, None, 128) 49280 conv1d_75[0][0]
__________________________________________________________________________________________________
multiply_24 (Multiply) (None, None, 128) 0 conv1d_76[0][0]
conv1d_77[0][0]
__________________________________________________________________________________________________
conv1d_78 (Conv1D) (None, None, 128) 16512 multiply_24[0][0]
__________________________________________________________________________________________________
add_25 (Add) (None, None, 128) 0 conv1d_75[0][0]
conv1d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, None, 128) 512 add_25[0][0]
__________________________________________________________________________________________________
dropout_3 (Dropout) (None, None, 128) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
bidirectional_2 (Bidirectional) (None, None, 768) 1575936 dropout_3[0][0]
__________________________________________________________________________________________________
add_26 (Add) (None, None, 768) 0 bidirectional_2[0][0]
add[0][0]
__________________________________________________________________________________________________
tf_op_layer_strided_slice (Tens [(None, None, 768)] 0 add_26[0][0]
__________________________________________________________________________________________________
outputs (Dense) (None, None, 5) 3845 tf_op_layer_strided_slice[0][0]
==================================================================================================
Total params: 8,116,277
Trainable params: 8,115,797
Non-trainable params: 480
__________________________________________________________________________________________________
###Markdown
Pre-process
###Code
feature_cols = ['sequence', 'structure', 'predicted_loop_type']
pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C']
encoder_list = [token2int, token2int, token2int]
train_features = np.array([preprocess_inputs(train, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
train_labels = np.array(train[pred_cols].values.tolist()).transpose((0, 2, 1))
public_test = test.query("seq_length == 107").copy()
private_test = test.query("seq_length == 130").copy()
x_test_public = np.array([preprocess_inputs(public_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
x_test_private = np.array([preprocess_inputs(private_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
# To use as stratified col
train['signal_to_noise_int'] = train['signal_to_noise'].astype(int)
###Output
_____no_output_____
###Markdown
Training
###Code
AUTO = tf.data.experimental.AUTOTUNE
skf = StratifiedKFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED)
history_list = []
oof = train[['id']].copy()
oof_preds = np.zeros(train_labels.shape)
test_public_preds = np.zeros((x_test_public.shape[0], config['PB_SEQ_LEN'], len(pred_cols)))
test_private_preds = np.zeros((x_test_private.shape[0], config['PV_SEQ_LEN'], len(pred_cols)))
for fold,(train_idx, valid_idx) in enumerate(skf.split(train_labels, train['signal_to_noise_int'])):
if fold >= config['N_USED_FOLDS']:
break
print(f'\nFOLD: {fold+1}')
### Create datasets
x_train = train_features[train_idx]
y_train = train_labels[train_idx]
x_valid = train_features[valid_idx]
y_valid = train_labels[valid_idx]
train_ds = get_dataset(x_train, y_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
valid_ds = get_dataset(x_valid, y_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
oof_ds = get_dataset(train_features[valid_idx], labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
### Model
K.clear_session()
model = model_fn()
model_path = f'model_{fold}.h5'
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1)
rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1)
### Train
history = model.fit(train_ds,
validation_data=valid_ds,
callbacks=[es, rlrp],
epochs=config['EPOCHS'],
batch_size=config['BATCH_SIZE'],
verbose=2).history
history_list.append(history)
# Save last model weights
model.save_weights(model_path)
### Inference
oof_preds[valid_idx] = model.predict(oof_ds)
# Short sequence (public test)
model = model_fn(pred_len= config['PB_SEQ_LEN'])
model.load_weights(model_path)
test_public_preds += model.predict(test_public_ds) * (1 / config['N_USED_FOLDS'])
# Long sequence (private test)
model = model_fn(pred_len= config['PV_SEQ_LEN'])
model.load_weights(model_path)
test_private_preds += model.predict(test_private_ds) * (1 / config['N_USED_FOLDS'])
###Output
FOLD: 1
Epoch 1/120
30/30 - 14s - loss: 0.5858 - val_loss: 0.6858
Epoch 2/120
30/30 - 11s - loss: 0.5031 - val_loss: 0.6689
Epoch 3/120
30/30 - 11s - loss: 0.4856 - val_loss: 0.6335
Epoch 4/120
30/30 - 11s - loss: 0.4732 - val_loss: 0.6132
Epoch 5/120
30/30 - 11s - loss: 0.4640 - val_loss: 0.5835
Epoch 6/120
30/30 - 11s - loss: 0.4588 - val_loss: 0.5596
Epoch 7/120
30/30 - 11s - loss: 0.4495 - val_loss: 0.5399
Epoch 8/120
30/30 - 11s - loss: 0.4424 - val_loss: 0.5253
Epoch 9/120
30/30 - 11s - loss: 0.4346 - val_loss: 0.5219
Epoch 10/120
30/30 - 11s - loss: 0.4299 - val_loss: 0.5088
Epoch 11/120
30/30 - 11s - loss: 0.4234 - val_loss: 0.5024
Epoch 12/120
30/30 - 11s - loss: 0.4184 - val_loss: 0.5052
Epoch 13/120
30/30 - 11s - loss: 0.4140 - val_loss: 0.4950
Epoch 14/120
30/30 - 11s - loss: 0.4104 - val_loss: 0.4955
Epoch 15/120
30/30 - 11s - loss: 0.4090 - val_loss: 0.4939
Epoch 16/120
30/30 - 11s - loss: 0.4064 - val_loss: 0.4916
Epoch 17/120
30/30 - 11s - loss: 0.4023 - val_loss: 0.4857
Epoch 18/120
30/30 - 11s - loss: 0.3995 - val_loss: 0.4844
Epoch 19/120
30/30 - 11s - loss: 0.3958 - val_loss: 0.4835
Epoch 20/120
30/30 - 11s - loss: 0.3924 - val_loss: 0.4817
Epoch 21/120
30/30 - 11s - loss: 0.3897 - val_loss: 0.4787
Epoch 22/120
30/30 - 11s - loss: 0.3892 - val_loss: 0.4730
Epoch 23/120
30/30 - 11s - loss: 0.3864 - val_loss: 0.4785
Epoch 24/120
30/30 - 11s - loss: 0.3835 - val_loss: 0.4796
Epoch 25/120
30/30 - 11s - loss: 0.3836 - val_loss: 0.4761
Epoch 26/120
30/30 - 11s - loss: 0.3777 - val_loss: 0.4697
Epoch 27/120
30/30 - 11s - loss: 0.3755 - val_loss: 0.4717
Epoch 28/120
30/30 - 11s - loss: 0.3738 - val_loss: 0.4685
Epoch 29/120
30/30 - 11s - loss: 0.3724 - val_loss: 0.4666
Epoch 30/120
30/30 - 11s - loss: 0.3705 - val_loss: 0.4682
Epoch 31/120
30/30 - 11s - loss: 0.3689 - val_loss: 0.4642
Epoch 32/120
30/30 - 11s - loss: 0.3664 - val_loss: 0.4636
Epoch 33/120
30/30 - 11s - loss: 0.3651 - val_loss: 0.4644
Epoch 34/120
30/30 - 11s - loss: 0.3615 - val_loss: 0.4623
Epoch 35/120
30/30 - 11s - loss: 0.3606 - val_loss: 0.4632
Epoch 36/120
30/30 - 11s - loss: 0.3590 - val_loss: 0.4626
Epoch 37/120
30/30 - 11s - loss: 0.3563 - val_loss: 0.4618
Epoch 38/120
30/30 - 11s - loss: 0.3553 - val_loss: 0.4612
Epoch 39/120
30/30 - 11s - loss: 0.3534 - val_loss: 0.4617
Epoch 40/120
30/30 - 11s - loss: 0.3508 - val_loss: 0.4592
Epoch 41/120
30/30 - 11s - loss: 0.3498 - val_loss: 0.4599
Epoch 42/120
30/30 - 11s - loss: 0.3481 - val_loss: 0.4584
Epoch 43/120
30/30 - 11s - loss: 0.3454 - val_loss: 0.4562
Epoch 44/120
30/30 - 11s - loss: 0.3459 - val_loss: 0.4586
Epoch 45/120
30/30 - 11s - loss: 0.3439 - val_loss: 0.4587
Epoch 46/120
30/30 - 11s - loss: 0.3411 - val_loss: 0.4571
Epoch 47/120
30/30 - 11s - loss: 0.3393 - val_loss: 0.4582
Epoch 48/120
Epoch 00048: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
30/30 - 11s - loss: 0.3377 - val_loss: 0.4566
Epoch 49/120
30/30 - 11s - loss: 0.3314 - val_loss: 0.4539
Epoch 50/120
30/30 - 11s - loss: 0.3283 - val_loss: 0.4534
Epoch 51/120
30/30 - 11s - loss: 0.3272 - val_loss: 0.4529
Epoch 52/120
30/30 - 11s - loss: 0.3261 - val_loss: 0.4532
Epoch 53/120
30/30 - 11s - loss: 0.3260 - val_loss: 0.4528
Epoch 54/120
30/30 - 11s - loss: 0.3255 - val_loss: 0.4531
Epoch 55/120
30/30 - 11s - loss: 0.3250 - val_loss: 0.4534
Epoch 56/120
30/30 - 11s - loss: 0.3242 - val_loss: 0.4533
Epoch 57/120
30/30 - 11s - loss: 0.3243 - val_loss: 0.4530
Epoch 58/120
Epoch 00058: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
30/30 - 11s - loss: 0.3232 - val_loss: 0.4529
Epoch 59/120
30/30 - 11s - loss: 0.3227 - val_loss: 0.4530
Epoch 60/120
30/30 - 11s - loss: 0.3227 - val_loss: 0.4530
Epoch 61/120
30/30 - 11s - loss: 0.3223 - val_loss: 0.4529
Epoch 62/120
30/30 - 11s - loss: 0.3225 - val_loss: 0.4528
Epoch 63/120
Epoch 00063: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.
30/30 - 11s - loss: 0.3221 - val_loss: 0.4532
Epoch 64/120
30/30 - 11s - loss: 0.3223 - val_loss: 0.4531
Epoch 65/120
30/30 - 11s - loss: 0.3218 - val_loss: 0.4531
Epoch 66/120
30/30 - 11s - loss: 0.3224 - val_loss: 0.4531
Epoch 67/120
30/30 - 11s - loss: 0.3222 - val_loss: 0.4531
Epoch 68/120
Epoch 00068: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.
30/30 - 11s - loss: 0.3223 - val_loss: 0.4531
Epoch 69/120
30/30 - 11s - loss: 0.3224 - val_loss: 0.4531
Epoch 70/120
30/30 - 11s - loss: 0.3223 - val_loss: 0.4531
Epoch 71/120
30/30 - 11s - loss: 0.3220 - val_loss: 0.4531
Epoch 72/120
Restoring model weights from the end of the best epoch.
30/30 - 11s - loss: 0.3219 - val_loss: 0.4531
Epoch 00072: early stopping
FOLD: 2
Epoch 1/120
30/30 - 14s - loss: 0.6098 - val_loss: 0.5799
Epoch 2/120
30/30 - 11s - loss: 0.5312 - val_loss: 0.5746
Epoch 3/120
30/30 - 11s - loss: 0.5091 - val_loss: 0.5344
Epoch 4/120
30/30 - 11s - loss: 0.4980 - val_loss: 0.4890
Epoch 5/120
30/30 - 11s - loss: 0.4895 - val_loss: 0.4722
Epoch 6/120
30/30 - 11s - loss: 0.4800 - val_loss: 0.4414
Epoch 7/120
30/30 - 11s - loss: 0.4714 - val_loss: 0.4343
Epoch 8/120
30/30 - 11s - loss: 0.4640 - val_loss: 0.4298
Epoch 9/120
30/30 - 11s - loss: 0.4585 - val_loss: 0.4077
Epoch 10/120
30/30 - 11s - loss: 0.4529 - val_loss: 0.4062
Epoch 11/120
30/30 - 11s - loss: 0.4486 - val_loss: 0.4374
Epoch 12/120
30/30 - 11s - loss: 0.4446 - val_loss: 0.4042
Epoch 13/120
30/30 - 11s - loss: 0.4387 - val_loss: 0.4048
Epoch 14/120
30/30 - 11s - loss: 0.4350 - val_loss: 0.4035
Epoch 15/120
30/30 - 11s - loss: 0.4321 - val_loss: 0.3923
Epoch 16/120
30/30 - 11s - loss: 0.4281 - val_loss: 0.3924
Epoch 17/120
30/30 - 11s - loss: 0.4256 - val_loss: 0.3889
Epoch 18/120
30/30 - 11s - loss: 0.4227 - val_loss: 0.3877
Epoch 19/120
30/30 - 11s - loss: 0.4196 - val_loss: 0.3787
Epoch 20/120
30/30 - 11s - loss: 0.4170 - val_loss: 0.3774
Epoch 21/120
30/30 - 11s - loss: 0.4166 - val_loss: 0.3817
Epoch 22/120
30/30 - 11s - loss: 0.4126 - val_loss: 0.3746
Epoch 23/120
30/30 - 11s - loss: 0.4093 - val_loss: 0.3775
Epoch 24/120
30/30 - 11s - loss: 0.4085 - val_loss: 0.3747
Epoch 25/120
30/30 - 11s - loss: 0.4070 - val_loss: 0.3730
Epoch 26/120
30/30 - 11s - loss: 0.4034 - val_loss: 0.3773
Epoch 27/120
30/30 - 11s - loss: 0.4028 - val_loss: 0.3719
Epoch 28/120
30/30 - 11s - loss: 0.3996 - val_loss: 0.3687
Epoch 29/120
30/30 - 11s - loss: 0.3988 - val_loss: 0.3703
Epoch 30/120
30/30 - 11s - loss: 0.3987 - val_loss: 0.3662
Epoch 31/120
30/30 - 11s - loss: 0.3960 - val_loss: 0.3698
Epoch 32/120
30/30 - 11s - loss: 0.3941 - val_loss: 0.3660
Epoch 33/120
30/30 - 11s - loss: 0.3914 - val_loss: 0.3708
Epoch 34/120
30/30 - 11s - loss: 0.3896 - val_loss: 0.3645
Epoch 35/120
30/30 - 11s - loss: 0.3880 - val_loss: 0.3675
Epoch 36/120
30/30 - 11s - loss: 0.3868 - val_loss: 0.3696
Epoch 37/120
30/30 - 11s - loss: 0.3842 - val_loss: 0.3686
Epoch 38/120
30/30 - 11s - loss: 0.3821 - val_loss: 0.3581
Epoch 39/120
30/30 - 11s - loss: 0.3799 - val_loss: 0.3614
Epoch 40/120
30/30 - 11s - loss: 0.3778 - val_loss: 0.3607
Epoch 41/120
30/30 - 11s - loss: 0.3768 - val_loss: 0.3617
Epoch 42/120
30/30 - 11s - loss: 0.3754 - val_loss: 0.3610
Epoch 43/120
Epoch 00043: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
30/30 - 11s - loss: 0.3733 - val_loss: 0.3610
Epoch 44/120
30/30 - 11s - loss: 0.3676 - val_loss: 0.3560
Epoch 45/120
30/30 - 11s - loss: 0.3650 - val_loss: 0.3553
Epoch 46/120
30/30 - 11s - loss: 0.3630 - val_loss: 0.3549
Epoch 47/120
30/30 - 11s - loss: 0.3625 - val_loss: 0.3548
Epoch 48/120
30/30 - 11s - loss: 0.3619 - val_loss: 0.3548
Epoch 49/120
30/30 - 11s - loss: 0.3615 - val_loss: 0.3537
Epoch 50/120
30/30 - 11s - loss: 0.3612 - val_loss: 0.3544
Epoch 51/120
30/30 - 11s - loss: 0.3608 - val_loss: 0.3541
Epoch 52/120
30/30 - 11s - loss: 0.3605 - val_loss: 0.3539
Epoch 53/120
30/30 - 11s - loss: 0.3596 - val_loss: 0.3539
Epoch 54/120
Epoch 00054: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
30/30 - 11s - loss: 0.3594 - val_loss: 0.3538
Epoch 55/120
30/30 - 11s - loss: 0.3592 - val_loss: 0.3538
Epoch 56/120
30/30 - 11s - loss: 0.3589 - val_loss: 0.3538
Epoch 57/120
30/30 - 11s - loss: 0.3587 - val_loss: 0.3539
Epoch 58/120
30/30 - 11s - loss: 0.3585 - val_loss: 0.3539
Epoch 59/120
Restoring model weights from the end of the best epoch.
Epoch 00059: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.
30/30 - 11s - loss: 0.3584 - val_loss: 0.3540
Epoch 00059: early stopping
FOLD: 3
Epoch 1/120
30/30 - 14s - loss: 0.6061 - val_loss: 0.5892
Epoch 2/120
30/30 - 11s - loss: 0.5237 - val_loss: 0.5722
Epoch 3/120
30/30 - 11s - loss: 0.5038 - val_loss: 0.5541
Epoch 4/120
30/30 - 11s - loss: 0.4936 - val_loss: 0.5131
Epoch 5/120
30/30 - 11s - loss: 0.4850 - val_loss: 0.4769
Epoch 6/120
30/30 - 11s - loss: 0.4769 - val_loss: 0.4532
Epoch 7/120
30/30 - 11s - loss: 0.4688 - val_loss: 0.4445
Epoch 8/120
30/30 - 11s - loss: 0.4614 - val_loss: 0.4318
Epoch 9/120
30/30 - 11s - loss: 0.4539 - val_loss: 0.4267
Epoch 10/120
30/30 - 11s - loss: 0.4487 - val_loss: 0.4161
Epoch 11/120
30/30 - 11s - loss: 0.4450 - val_loss: 0.4154
Epoch 12/120
30/30 - 11s - loss: 0.4398 - val_loss: 0.4097
Epoch 13/120
30/30 - 11s - loss: 0.4351 - val_loss: 0.4063
Epoch 14/120
30/30 - 11s - loss: 0.4322 - val_loss: 0.4012
Epoch 15/120
30/30 - 11s - loss: 0.4285 - val_loss: 0.3981
Epoch 16/120
30/30 - 11s - loss: 0.4243 - val_loss: 0.3970
Epoch 17/120
30/30 - 11s - loss: 0.4217 - val_loss: 0.4002
Epoch 18/120
30/30 - 11s - loss: 0.4196 - val_loss: 0.3919
Epoch 19/120
30/30 - 11s - loss: 0.4159 - val_loss: 0.3934
Epoch 20/120
30/30 - 11s - loss: 0.4142 - val_loss: 0.3949
Epoch 21/120
30/30 - 11s - loss: 0.4126 - val_loss: 0.3912
Epoch 22/120
30/30 - 11s - loss: 0.4103 - val_loss: 0.3895
Epoch 23/120
30/30 - 11s - loss: 0.4074 - val_loss: 0.3841
Epoch 24/120
30/30 - 11s - loss: 0.4054 - val_loss: 0.3862
Epoch 25/120
30/30 - 11s - loss: 0.4023 - val_loss: 0.3822
Epoch 26/120
30/30 - 11s - loss: 0.4007 - val_loss: 0.3801
Epoch 27/120
30/30 - 11s - loss: 0.3976 - val_loss: 0.3797
Epoch 28/120
30/30 - 11s - loss: 0.3959 - val_loss: 0.3799
Epoch 29/120
30/30 - 11s - loss: 0.3943 - val_loss: 0.3772
Epoch 30/120
30/30 - 11s - loss: 0.3917 - val_loss: 0.3800
Epoch 31/120
30/30 - 11s - loss: 0.3899 - val_loss: 0.3772
Epoch 32/120
30/30 - 11s - loss: 0.3894 - val_loss: 0.3775
Epoch 33/120
30/30 - 11s - loss: 0.3857 - val_loss: 0.3741
Epoch 34/120
30/30 - 11s - loss: 0.3836 - val_loss: 0.3746
Epoch 35/120
30/30 - 11s - loss: 0.3823 - val_loss: 0.3784
Epoch 36/120
30/30 - 11s - loss: 0.3813 - val_loss: 0.3739
Epoch 37/120
30/30 - 11s - loss: 0.3790 - val_loss: 0.3738
Epoch 38/120
30/30 - 11s - loss: 0.3769 - val_loss: 0.3739
Epoch 39/120
30/30 - 11s - loss: 0.3749 - val_loss: 0.3751
Epoch 40/120
30/30 - 11s - loss: 0.3751 - val_loss: 0.3801
Epoch 41/120
30/30 - 11s - loss: 0.3728 - val_loss: 0.3748
Epoch 42/120
30/30 - 11s - loss: 0.3700 - val_loss: 0.3712
Epoch 43/120
30/30 - 11s - loss: 0.3686 - val_loss: 0.3716
Epoch 44/120
30/30 - 11s - loss: 0.3677 - val_loss: 0.3764
Epoch 45/120
30/30 - 11s - loss: 0.3661 - val_loss: 0.3726
Epoch 46/120
30/30 - 11s - loss: 0.3638 - val_loss: 0.3713
Epoch 47/120
Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
30/30 - 11s - loss: 0.3627 - val_loss: 0.3743
Epoch 48/120
30/30 - 11s - loss: 0.3557 - val_loss: 0.3687
Epoch 49/120
30/30 - 11s - loss: 0.3526 - val_loss: 0.3681
Epoch 50/120
30/30 - 11s - loss: 0.3515 - val_loss: 0.3669
Epoch 51/120
30/30 - 11s - loss: 0.3506 - val_loss: 0.3668
Epoch 52/120
30/30 - 11s - loss: 0.3499 - val_loss: 0.3669
Epoch 53/120
30/30 - 11s - loss: 0.3498 - val_loss: 0.3670
Epoch 54/120
30/30 - 11s - loss: 0.3490 - val_loss: 0.3673
Epoch 55/120
30/30 - 11s - loss: 0.3484 - val_loss: 0.3675
Epoch 56/120
Epoch 00056: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
30/30 - 11s - loss: 0.3479 - val_loss: 0.3681
Epoch 57/120
30/30 - 11s - loss: 0.3471 - val_loss: 0.3678
Epoch 58/120
30/30 - 11s - loss: 0.3468 - val_loss: 0.3677
Epoch 59/120
30/30 - 11s - loss: 0.3471 - val_loss: 0.3676
Epoch 60/120
30/30 - 11s - loss: 0.3469 - val_loss: 0.3675
Epoch 61/120
Restoring model weights from the end of the best epoch.
Epoch 00061: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.
30/30 - 11s - loss: 0.3470 - val_loss: 0.3676
Epoch 00061: early stopping
FOLD: 4
Epoch 1/120
30/30 - 14s - loss: 0.5990 - val_loss: 0.5668
Epoch 2/120
30/30 - 11s - loss: 0.5239 - val_loss: 0.5607
Epoch 3/120
30/30 - 11s - loss: 0.5044 - val_loss: 0.5461
Epoch 4/120
30/30 - 11s - loss: 0.4955 - val_loss: 0.5157
Epoch 5/120
30/30 - 11s - loss: 0.4855 - val_loss: 0.4806
Epoch 6/120
30/30 - 11s - loss: 0.4753 - val_loss: 0.4687
Epoch 7/120
30/30 - 11s - loss: 0.4681 - val_loss: 0.4574
Epoch 8/120
30/30 - 11s - loss: 0.4602 - val_loss: 0.4490
Epoch 9/120
30/30 - 11s - loss: 0.4513 - val_loss: 0.4399
Epoch 10/120
30/30 - 11s - loss: 0.4452 - val_loss: 0.4353
Epoch 11/120
30/30 - 11s - loss: 0.4406 - val_loss: 0.4320
Epoch 12/120
30/30 - 11s - loss: 0.4358 - val_loss: 0.4260
Epoch 13/120
30/30 - 11s - loss: 0.4317 - val_loss: 0.4204
Epoch 14/120
30/30 - 11s - loss: 0.4274 - val_loss: 0.4187
Epoch 15/120
30/30 - 11s - loss: 0.4252 - val_loss: 0.4209
Epoch 16/120
30/30 - 11s - loss: 0.4229 - val_loss: 0.4155
Epoch 17/120
30/30 - 11s - loss: 0.4187 - val_loss: 0.4138
Epoch 18/120
30/30 - 11s - loss: 0.4157 - val_loss: 0.4061
Epoch 19/120
30/30 - 11s - loss: 0.4128 - val_loss: 0.4088
Epoch 20/120
30/30 - 11s - loss: 0.4110 - val_loss: 0.4008
Epoch 21/120
30/30 - 11s - loss: 0.4076 - val_loss: 0.4043
Epoch 22/120
30/30 - 11s - loss: 0.4064 - val_loss: 0.4006
Epoch 23/120
30/30 - 11s - loss: 0.4033 - val_loss: 0.4008
Epoch 24/120
30/30 - 11s - loss: 0.4017 - val_loss: 0.3977
Epoch 25/120
30/30 - 11s - loss: 0.3989 - val_loss: 0.3991
Epoch 26/120
30/30 - 11s - loss: 0.3970 - val_loss: 0.3968
Epoch 27/120
30/30 - 11s - loss: 0.3964 - val_loss: 0.3966
Epoch 28/120
30/30 - 11s - loss: 0.3929 - val_loss: 0.3930
Epoch 29/120
30/30 - 11s - loss: 0.3909 - val_loss: 0.3927
Epoch 30/120
30/30 - 11s - loss: 0.3895 - val_loss: 0.3928
Epoch 31/120
30/30 - 11s - loss: 0.3893 - val_loss: 0.3897
Epoch 32/120
30/30 - 11s - loss: 0.3865 - val_loss: 0.3887
Epoch 33/120
30/30 - 11s - loss: 0.3839 - val_loss: 0.3875
Epoch 34/120
30/30 - 11s - loss: 0.3821 - val_loss: 0.3867
Epoch 35/120
30/30 - 11s - loss: 0.3813 - val_loss: 0.3922
Epoch 36/120
30/30 - 11s - loss: 0.3796 - val_loss: 0.3861
Epoch 37/120
30/30 - 11s - loss: 0.3763 - val_loss: 0.3851
Epoch 38/120
30/30 - 11s - loss: 0.3754 - val_loss: 0.3854
Epoch 39/120
30/30 - 11s - loss: 0.3747 - val_loss: 0.3862
Epoch 40/120
30/30 - 11s - loss: 0.3732 - val_loss: 0.3839
Epoch 41/120
30/30 - 11s - loss: 0.3709 - val_loss: 0.3852
Epoch 42/120
30/30 - 11s - loss: 0.3691 - val_loss: 0.3857
Epoch 43/120
30/30 - 11s - loss: 0.3671 - val_loss: 0.3849
Epoch 44/120
30/30 - 11s - loss: 0.3679 - val_loss: 0.3844
Epoch 45/120
Epoch 00045: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
30/30 - 11s - loss: 0.3655 - val_loss: 0.3848
Epoch 46/120
30/30 - 11s - loss: 0.3593 - val_loss: 0.3805
Epoch 47/120
30/30 - 11s - loss: 0.3559 - val_loss: 0.3790
Epoch 48/120
30/30 - 11s - loss: 0.3552 - val_loss: 0.3788
Epoch 49/120
30/30 - 11s - loss: 0.3543 - val_loss: 0.3793
Epoch 50/120
30/30 - 11s - loss: 0.3535 - val_loss: 0.3789
Epoch 51/120
30/30 - 11s - loss: 0.3530 - val_loss: 0.3788
Epoch 52/120
30/30 - 11s - loss: 0.3526 - val_loss: 0.3793
Epoch 53/120
Epoch 00053: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
30/30 - 11s - loss: 0.3518 - val_loss: 0.3793
Epoch 54/120
30/30 - 11s - loss: 0.3514 - val_loss: 0.3789
Epoch 55/120
30/30 - 11s - loss: 0.3509 - val_loss: 0.3787
Epoch 56/120
30/30 - 11s - loss: 0.3511 - val_loss: 0.3788
Epoch 57/120
30/30 - 11s - loss: 0.3507 - val_loss: 0.3789
Epoch 58/120
Epoch 00058: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.
30/30 - 11s - loss: 0.3506 - val_loss: 0.3788
Epoch 59/120
30/30 - 11s - loss: 0.3507 - val_loss: 0.3788
Epoch 60/120
30/30 - 11s - loss: 0.3506 - val_loss: 0.3788
Epoch 61/120
30/30 - 11s - loss: 0.3506 - val_loss: 0.3788
Epoch 62/120
30/30 - 11s - loss: 0.3506 - val_loss: 0.3788
Epoch 63/120
Epoch 00063: ReduceLROnPlateau reducing learning rate to 1.0000001111620805e-07.
30/30 - 11s - loss: 0.3505 - val_loss: 0.3788
Epoch 64/120
30/30 - 11s - loss: 0.3506 - val_loss: 0.3788
Epoch 65/120
Restoring model weights from the end of the best epoch.
30/30 - 11s - loss: 0.3505 - val_loss: 0.3788
Epoch 00065: early stopping
FOLD: 5
Epoch 1/120
30/30 - 14s - loss: 0.6006 - val_loss: 0.6228
Epoch 2/120
30/30 - 11s - loss: 0.5252 - val_loss: 0.6106
Epoch 3/120
30/30 - 11s - loss: 0.5048 - val_loss: 0.6036
Epoch 4/120
30/30 - 11s - loss: 0.4947 - val_loss: 0.6035
Epoch 5/120
30/30 - 11s - loss: 0.4854 - val_loss: 0.5009
Epoch 6/120
30/30 - 11s - loss: 0.4773 - val_loss: 0.4795
Epoch 7/120
30/30 - 11s - loss: 0.4681 - val_loss: 0.4661
Epoch 8/120
30/30 - 11s - loss: 0.4610 - val_loss: 0.4400
Epoch 9/120
30/30 - 11s - loss: 0.4532 - val_loss: 0.4341
Epoch 10/120
30/30 - 11s - loss: 0.4484 - val_loss: 0.4311
Epoch 11/120
30/30 - 11s - loss: 0.4420 - val_loss: 0.4250
Epoch 12/120
30/30 - 11s - loss: 0.4394 - val_loss: 0.4256
Epoch 13/120
30/30 - 11s - loss: 0.4353 - val_loss: 0.4182
Epoch 14/120
30/30 - 11s - loss: 0.4301 - val_loss: 0.4095
Epoch 15/120
30/30 - 11s - loss: 0.4269 - val_loss: 0.4097
Epoch 16/120
30/30 - 11s - loss: 0.4233 - val_loss: 0.4086
Epoch 17/120
30/30 - 11s - loss: 0.4223 - val_loss: 0.4052
Epoch 18/120
30/30 - 11s - loss: 0.4161 - val_loss: 0.4037
Epoch 19/120
30/30 - 11s - loss: 0.4144 - val_loss: 0.4025
Epoch 20/120
30/30 - 11s - loss: 0.4111 - val_loss: 0.4008
Epoch 21/120
30/30 - 11s - loss: 0.4099 - val_loss: 0.3977
Epoch 22/120
30/30 - 11s - loss: 0.4060 - val_loss: 0.3976
Epoch 23/120
30/30 - 11s - loss: 0.4049 - val_loss: 0.3959
Epoch 24/120
30/30 - 11s - loss: 0.4014 - val_loss: 0.3948
Epoch 25/120
30/30 - 11s - loss: 0.3992 - val_loss: 0.3968
Epoch 26/120
30/30 - 11s - loss: 0.3972 - val_loss: 0.3922
Epoch 27/120
30/30 - 11s - loss: 0.3954 - val_loss: 0.3900
Epoch 28/120
30/30 - 11s - loss: 0.3919 - val_loss: 0.3931
Epoch 29/120
30/30 - 11s - loss: 0.3898 - val_loss: 0.3914
Epoch 30/120
30/30 - 11s - loss: 0.3892 - val_loss: 0.3868
Epoch 31/120
30/30 - 11s - loss: 0.3871 - val_loss: 0.3915
Epoch 32/120
30/30 - 11s - loss: 0.3853 - val_loss: 0.3884
Epoch 33/120
30/30 - 11s - loss: 0.3838 - val_loss: 0.3864
Epoch 34/120
30/30 - 11s - loss: 0.3801 - val_loss: 0.3835
Epoch 35/120
30/30 - 11s - loss: 0.3787 - val_loss: 0.3864
Epoch 36/120
30/30 - 11s - loss: 0.3773 - val_loss: 0.3847
Epoch 37/120
30/30 - 11s - loss: 0.3754 - val_loss: 0.3860
Epoch 38/120
30/30 - 11s - loss: 0.3724 - val_loss: 0.3834
Epoch 39/120
Epoch 00039: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.
30/30 - 11s - loss: 0.3701 - val_loss: 0.3839
Epoch 40/120
30/30 - 11s - loss: 0.3635 - val_loss: 0.3803
Epoch 41/120
30/30 - 11s - loss: 0.3598 - val_loss: 0.3791
Epoch 42/120
30/30 - 11s - loss: 0.3585 - val_loss: 0.3787
Epoch 43/120
30/30 - 11s - loss: 0.3578 - val_loss: 0.3791
Epoch 44/120
30/30 - 11s - loss: 0.3574 - val_loss: 0.3791
Epoch 45/120
30/30 - 11s - loss: 0.3564 - val_loss: 0.3791
Epoch 46/120
30/30 - 11s - loss: 0.3558 - val_loss: 0.3787
Epoch 47/120
Epoch 00047: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.
30/30 - 11s - loss: 0.3555 - val_loss: 0.3793
Epoch 48/120
30/30 - 11s - loss: 0.3548 - val_loss: 0.3790
Epoch 49/120
30/30 - 11s - loss: 0.3540 - val_loss: 0.3790
Epoch 50/120
30/30 - 11s - loss: 0.3543 - val_loss: 0.3791
Epoch 51/120
30/30 - 11s - loss: 0.3541 - val_loss: 0.3787
Epoch 52/120
Restoring model weights from the end of the best epoch.
Epoch 00052: ReduceLROnPlateau reducing learning rate to 1.0000000656873453e-06.
30/30 - 11s - loss: 0.3541 - val_loss: 0.3789
Epoch 00052: early stopping
###Markdown
Model loss graph
###Code
for fold, history in enumerate(history_list):
print(f'\nFOLD: {fold+1}')
print(f"Train {np.array(history['loss']).min():.5f} Validation {np.array(history['val_loss']).min():.5f}")
plot_metrics(history)
###Output
FOLD: 1
Train 0.32179 Validation 0.45283
###Markdown
Post-processing
###Code
# Assign values to OOF set
# Assign labels
for idx, col in enumerate(pred_cols):
val = train_labels[:, :, idx]
oof = oof.assign(**{col: list(val)})
# Assign preds
for idx, col in enumerate(pred_cols):
val = oof_preds[:, :, idx]
oof = oof.assign(**{f'{col}_pred': list(val)})
# Assign values to test set
preds_ls = []
for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=pred_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_ls.append(single_df)
preds_df = pd.concat(preds_ls)
###Output
_____no_output_____
###Markdown
Model evaluation
###Code
display(evaluate_model(train, train_labels, oof_preds, pred_cols))
###Output
_____no_output_____
###Markdown
Visualize test predictions
###Code
submission = pd.read_csv(database_base_path + 'sample_submission.csv')
submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos'])
###Output
_____no_output_____
###Markdown
Test set predictions
###Code
display(submission.head(10))
display(submission.describe())
submission.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
notebooks/B2_FeatureExtraction.ipynb | ###Markdown
Copyright © 2020-2021 by Fraunhofer-Gesellschaft. All rights reserved.Fraunhofer Institute for Integrated Circuits IIS, Division Engineering of Adaptive Systems EASZeunerstraße 38, 01069 Dresden, Germany--- ESB - Energy Saving by BlockchainEurostars – EXP 00119832 / EUS-2019113348--- Prediction of Energy Consumption for Variable Customer Portfolios Including Aleatoric Uncertainty Estimation*Oliver Mey, André Schneider, Olaf Enge-Rosenblatt, Yesnier Bravo, Pit Stenzel*The notebook is part of a paper submission contributed to the **10th International Conference on Power Science and Engineering (ICPSE 2021)** will be held on Oct. 21-23, 2021 in Yildiz Technical University, Istanbul, Turkey.--- B2: Data Preprocessing and Feature ExtractionThis notebook loads the available datasets, splits the datasets into three subsets for training, test and validation, fits the scalers and encoders for feature extraction, and extracts the features for all subsets. At the end, the feature vectors are explained in detail.---Version 0.4.3 (August 5, 2021)Authors: Oliver Mey, André Schneider (Fraunhofer IIS)
###Code
import warnings
warnings.filterwarnings('ignore')
import os
import joblib
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import holidays as hd
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.models import load_model
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
%matplotlib inline
sns.set(rc={'figure.figsize':(16, 6)})
###Output
_____no_output_____
###Markdown
Configuration
###Code
path = '..'
timezone = 'Europe/Madrid'
date = '2019-02-02'
customer = 20
seed = 12345
properties = {
't_consumption_daily': [-14, -1],
't_weather_daily': [-14, -1],
't_consumption_hourly': [-7, -1],
't_weather_hourly': [-2, 0],
}
###Output
_____no_output_____
###Markdown
Function Definitions
###Code
def fix_DST(data):
data = data[~data.index.duplicated(keep='first')]
data = data.resample('H').ffill()
return data
def crop(data):
hour_index = data.index.hour
t0 = data[hour_index==0].head(1).index
tn = data[hour_index==23].tail(1).index
data.drop(data.loc[data.index < t0[0]].index, inplace=True)
data.drop(data.loc[data.index > tn[0]].index, inplace=True)
return
def time_from_to(date, t, tz=timezone):
t0_ = pd.Timestamp(date, tz=tz)+pd.Timedelta(days=t[0])
tn_ = pd.Timestamp(date, tz=tz)+pd.Timedelta(days=t[1])+pd.Timedelta(hours=23)
return slice(t0_, tn_)
def day_from_to(date, t, tz=timezone):
t0_ = pd.Timestamp(date)+pd.Timedelta(days=t[0])
tn_ = pd.Timestamp(date)+pd.Timedelta(days=t[1])
return slice(t0_, tn_)
def softrange(x, x_min=0, x_max=1):
r = x_max - x_min
y = x_min + tf.constant(r)/tf.math.softplus(r) * tf.math.softplus(-tf.math.softplus(x_max-x) + r)
return y
###Output
_____no_output_____
###Markdown
Class Definitions Data Loader
###Code
class DataLoader:
def __init__(self, data_path, model_path):
self.data_path = data_path
self.model_path = model_path
self.categories = ['consumption', 'weather', 'profiles']
self.scaler_names = ['scaler_consumptions', 'scaler_consumptions_daily_mean',
'scaler_weather_daily_mean', 'scaler_day_of_month', 'scaler_month',
'scaler_weather_forecast']
self.files = [self.data_path + '/' + '20201015_' + name + '.xlsx' for name in self.categories]
return
def scale_data(self, data):
x = data.groupby(data.index.date).mean()
x.index = pd.to_datetime(x.index)
x = x.append(pd.DataFrame(x.tail(1), index=x.tail(1).index+pd.Timedelta(days=1)))
x = x.resample('h').ffill()[:-1]
x.index = data.index
y = data / x
y.fillna(value=0, inplace=True)
return y
def load_metadata(self):
customers = pd.read_excel(self.files[self.categories.index('profiles')])
customers.columns = ['customer', 'profile']
profiles = pd.DataFrame(customers['profile'].unique(), columns=['profile'])
holidays = hd.ES(years=list(range(2010, 2021)), prov="MD")
return customers, profiles, holidays
def load_data(self):
consumptions = pd.read_excel(self.files[self.categories.index('consumption')],
parse_dates=[0], index_col=0)
consumptions.columns = pd.DataFrame(consumptions.columns, columns=['customer']).index
consumptions.index.name = 'time'
consumptions = fix_DST(consumptions)
consumptions_scaled = self.scale_data(consumptions)
weather = pd.read_excel(self.files[self.categories.index('weather')], parse_dates=[0], index_col=0)
weather.columns = consumptions.columns
weather.index.name = 'time'
weather = fix_DST(weather)
weather_forecast = weather.copy()
weather_forecast.index = weather.index-pd.Timedelta(days=1)
weather_forecast = fix_DST(weather_forecast)
return consumptions, consumptions_scaled, weather, weather_forecast
def load_scalers(self):
scalers = [joblib.load(self.model_path + '/' + name) for name in self.scaler_names]
scalers = dict(zip(self.scaler_names, scalers))
scale = scalers['scaler_consumptions'].scale_
offset = scalers['scaler_consumptions'].mean_
return scalers
def load_models(self, names):
models = [load_model(model_path + '/' + name + '.h5') for name in names]
models = dict(zip(names, models))
return models
###Output
_____no_output_____
###Markdown
Feature Extractor
###Code
class FeatureExtractor:
def __init__(self, properties, refit=False):
self.t_consumption_daily = properties.get('t_consumption_daily', [-13, -1])
self.t_consumption_hourly = properties.get('t_consumption_hourly', [-2, -1])
self.t_weather_daily = properties.get('t_weather_daily', [-2, -0])
self.t_weather_hourly = properties.get('t_weather_hourly', [-2, -0])
self.encoder = properties.get('encoder')
if not refit:
scalers = properties.get('scalers')
self.scaler_consumption = scalers['scaler_consumptions']
self.scaler_weather = scalers['scaler_weather']
self.scaler_weather_forecast = scalers['scaler_weather_forecast']
self.scaler_day_of_month = scalers['scaler_day_of_month']
self.scaler_month = scalers['scaler_month']
return
def get_days(self, dates, holidays):
days = pd.DataFrame(pd.to_datetime(dates.date), index=dates, columns=['date'])
days['day_of_week'] = list(days.index.dayofweek)
days['day_of_month'] = list(days.index.day)
days['month'] = list(days.index.month)
days['day_category'] = days['day_of_week'].replace({0:0,1:1,2:1,3:1,4:2,5:3,6:4})
days.loc[days['date'].apply(lambda d: d in holidays), 'day_category'] = 4
days = days.groupby(['date']).first()
return days
def split(self, indices, seed=12345):
n = len(indices)
n_validate = n//10
n_test = n//10
n_train = n-n_validate - n_test
np.random.seed(seed)
I = np.random.permutation(indices)
I_train = I[0:n_train]
I_test = I[n_train:n_train + n_test]
I_validate = I[n_train + n_test:]
return I_train, I_test, I_validate
def fit(self, consumptions, weather, weather_forecast, holidays):
days = self.get_days(consumptions.index, holidays)
consumptions_daily_mean = pd.DataFrame(consumptions.groupby(consumptions.index.date).mean(),
index=days.index)
weather_daily_mean = pd.DataFrame(weather.groupby(weather.index.date).mean(), index=days.index)
households = customers[customers['profile'].astype(str).str.contains('hogares')].index.values
I_train, I_test, I_validate = self.split(households, seed)
self.scaler_consumptions = RobustScaler(quantile_range=(0,75))
self.scaler_consumptions.fit(consumptions_daily_mean.loc[:, I_train].values.reshape(-1, 1))
self.scaler_weather = RobustScaler(quantile_range=(0,75))
self.scaler_weather.fit(weather_daily_mean.loc[:, I_train].values.reshape(-1, 1))
self.scaler_day_of_month = RobustScaler(quantile_range=(0,75))
self.scaler_day_of_month.fit(days['day_of_month'].values.reshape(-1, 1))
self.scaler_month = RobustScaler(quantile_range=(0,75))
self.scaler_month.fit(days['month'].values.reshape(-1, 1))
X = weather_forecast.loc[:, I_train]
X.index = pd.MultiIndex.from_arrays([X.index.date, X.index.time], names=['date','time'])
X = X.stack().unstack(level=1)
self.scaler_weather_forecast = RobustScaler(quantile_range=(0,75))
self.scaler_weather_forecast.fit(X)
scalers = self.get_scalers()
dates = consumptions_daily_mean.index.date
self.days = days
self.consumptions_daily_mean = consumptions_daily_mean
self.weather_daily_mean = weather_daily_mean
return [I_train, I_test, I_validate], dates, scalers
def get_scalers(self):
scalers = {'scaler_consumptions': self.scaler_consumptions,
'scaler_weather': self.scaler_weather,
'scaler_weather_forecast': self.scaler_weather_forecast,
'scaler_day_of_month': self.scaler_day_of_month,
'scaler_month': self.scaler_month
}
return scalers
def extract(self, date, customer, consumptions, weather, holidays, offset=1e-5):
days = self.days
consumptions_daily_mean = self.consumptions_daily_mean
weather_daily_mean = self.weather_daily_mean
X1 = consumptions.loc[time_from_to(date, self.t_consumption_hourly),customer].values
X1 = np.array(X1).reshape(-1) + offset
X2 = weather.loc[time_from_to(date, self.t_weather_hourly),customer].values
X2 = self.scaler_weather_forecast.transform(np.array(X2).reshape(3,24)).reshape(-1)
X2 = (X2+1)/2
X3 = days.loc[pd.Timestamp(date),'month']
X3 = self.scaler_month.transform(np.array([X3]).reshape(-1,1))[0][0]
X3 = (X3+1)/2
X4 = days.loc[pd.Timestamp(date),'day_of_month']
X4 = self.scaler_day_of_month.transform(np.array([X4]).reshape(-1,1))[0][0]
X4 = (X4+1)/2
X5 = days.loc[pd.Timestamp(date),'day_category']
X5 = self.encoder.transform(np.array(X5).reshape(1, -1)).reshape(-1)
X6 = consumptions_daily_mean.loc[day_from_to(date, self.t_consumption_daily), customer].values
X6 = X6/(2*self.scaler_consumptions.scale_) + offset
X7 = weather_daily_mean.loc[day_from_to(date, self.t_weather_daily), customer].values
X7 = self.scaler_weather.transform(np.array([X7]).reshape(-1,1)).reshape(-1)
X7 = (X7+1)/2
Xa = np.concatenate([X1, X2, [X3], [X4], X5]).reshape(1,-1)
ya = consumptions.loc[time_from_to(date, [0, 0]),customer].values
Xb = np.concatenate([X6, X7, X5, [X4], [X3]]).reshape(1,-1)
yb = consumptions_daily_mean.loc[day_from_to(date, [0, 0]), customer].values
return [Xa, ya, Xb, yb]
###Output
_____no_output_____
###Markdown
Prediction Model
###Code
class PredictionModel:
def __init__(self, Xy, properties):
scalers = properties.get('scalers')
self.inputs = properties.get('inputs', [247, 35])
self.scale = scalers['scaler_consumptions'].scale_
self.offset = scalers['scaler_consumptions'].center_
self.Xa_train, self.ya_train, self.Xb_train, self.yb_train = self.get_samples(Xy[0])
self.Xa_test, self.ya_test, self.Xb_test, self.yb_test = self.get_samples(Xy[1])
self.Xa_validate, self.ya_validate, self.Xb_validate, self.yb_validate = self.get_samples(Xy[2])
return
def get_samples(self, Xy):
Xa = np.concatenate([Xy[i][0] for i in range(len(Xy))])
ya = np.concatenate([Xy[i][1] for i in range(len(Xy))]).reshape(-1,24)
Xb = np.concatenate([Xy[i][2] for i in range(len(Xy))])
yb = np.concatenate([Xy[i][3] for i in range(len(Xy))]).reshape(-1,1)
return Xa, ya, Xb, yb
###Output
_____no_output_____
###Markdown
Loading Data
###Code
loader = DataLoader(path + '/data', path + '/models')
consumptions, consumptions_scaled, weather, weather_forecast = loader.load_data()
customers, profiles, holidays = loader.load_metadata()
scalers = loader.load_scalers()
encoder = OneHotEncoder(sparse=False)
encoder.fit(np.arange(5).reshape(-1,1))
properties['encoder'] = encoder
###Output
_____no_output_____
###Markdown
Extracting Features
###Code
extractor = FeatureExtractor(properties, refit=True)
I, dates, scalers = extractor.fit(consumptions, weather, weather_forecast, holidays)
features = [[extractor.extract(date, customer, consumptions_scaled, weather, holidays)
for date in dates[15:] for customer in Ii] for Ii in I]
###Output
_____no_output_____
###Markdown
Preparing the Model
###Code
properties['scalers'] = scalers
model = PredictionModel(features, properties)
###Output
_____no_output_____
###Markdown
Recorded and Preprocessed Data Consumptions The dataframe *consumptions* contains the recorded hourly energy consumptions in kWh (*rows*) for all 499 customers (*columns*).
###Code
consumptions
_ = consumptions.loc[:, customer].plot(title='Hourly Energy Consumption of Customer #' + \
str(customer) + ' for the Entire Recording Period (Year)',
ylabel='energy consumption [kWh]', color='b', alpha=0.7)
_ = consumptions.loc[date, customer].plot(title='Hourly Energy Consumption of Customer #' + \
str(customer) + ' for a Selected Date (Day)',
ylabel='energy consumption [kWh]', color='b', alpha=0.7)
###Output
_____no_output_____
###Markdown
The dataframe *consumptions_daily_mean* contains the daily mean energy consumption for all customers.
###Code
extractor.consumptions_daily_mean
###Output
_____no_output_____
###Markdown
The dataframe *consumptions_scaled* contains the scaled energy consumptions for all customers. The raw values (see *consumptions*) are divided by the daily mean consumption (see *consumptions_daily_mean*) of the customer.
###Code
_ = consumptions_scaled.loc[date, customer].plot(title='Scaled Hourly Energy Consumption of Customer #' + \
str(customer) + ' for a Selected Date (Day)',
ylabel='scaled abstract consumption [without unit]', color='b', alpha=0.7)
###Output
_____no_output_____
###Markdown
The daily sum (total) of the scaled consumption values (see *consumptions_scaled*) is *24*. Weather The dataframe *weather* contains the weather data (outside temperature in °C) for the customers region with an hourly time resolution.
###Code
weather
_ = weather.loc[:, customer].plot(title='Hourly Outside Temperature for the Region of Customer #' + \
str(customer) + ' for the Entire Recording Period (Year)',
ylabel='outside temperature [°C]', color='g', alpha=0.7)
_ = weather.loc[date, customer].plot(title='Hourly Outside Temperature for the Region of Customer #' + \
str(customer) + ' for a Selected Date (Day)',
ylabel='outside temperature [°C]', color='g', alpha=0.7)
###Output
_____no_output_____
###Markdown
Day Category The dataframe *days* contains all day-related properties like the *day of week* (0...6), the *day of month* (1...31), the *month* (1...12) within the year, and the *day category* (0: Monday, 1: Tuesday-Thursday, 2: Friday, 3: Saturday, 4: Sunday/Holiday).
###Code
extractor.days
_ = extractor.days.loc['2019-12','day_category'].plot(kind='bar', color='orange', alpha=0.4,
title='Day Categories for December 2019')
###Output
_____no_output_____
###Markdown
Feature and Target Vectors The *features* contain three subsets: for training, test and validation.
###Code
len(features)
###Output
_____no_output_____
###Markdown
The training data contain 80% of the total number of samples. The test and validation sets contain 10% each.
###Code
_ = [print(s + ': ' + str(len(f)) + ' samples') for f,s in zip(features, ['train', 'test', 'validate'])]
###Output
train: 88200 samples
test: 10850 samples
validate: 10850 samples
###Markdown
Each sample consists of 4 parts: the feature vector *Xa* and the target value *ya* for the submodel *A* (intraday prediction) and the feature vector *Xb* and the target value *yb* for the submodel *B* (day-ahead prediction).
###Code
len(features[0][0])
###Output
_____no_output_____
###Markdown
The feature vector *Xa* contains a total of **247** values: scaled hourly consumption values (7dx24h), scaled hourly temperature values (3dx24h), the month, the day of month and the onehot-encoded day category. The feature vector *Xb* contains a total of **35** values: scaled daily mean consumption values (14d), scaled daily mean temperature values (14d), the onehot-encoded day category, the day of month and the month.The model *A* predicts the an abstract, unitless intraday curve with hourly resolution (24 values, target vector *ya*) and the model *B* predicts the day-ahead energy consumption (1 value, target vector *yb*).
###Code
_ = [print(s + ': ' + str(f.shape)) for f,s in zip(features[0][0], ['Xa', 'ya', 'Xb', 'yb'])]
features[0][0]
###Output
_____no_output_____ |
CFMM_for_DFT/tests/.ipynb_checkpoints/test_case_4-checkpoint.ipynb | ###Markdown
Test Case 4 Calculating interaction of randomly generated sphecial gaussian charge distribution with similar r_ext in 3D space
###Code
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
from fast_multipole_method import operation as op
from scipy.special import erf
from scipy.special import erfc
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
plt.style.use('ggplot')
def plot_3d(x):
"""plot particles in 3 dimentional"""
y = np.transpose(x)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(y[0], y[1], y[2])
ax = ax.view_init(30)
plt.show()
return
# btm_level == 3, l = 1/8, for WS_index <=2
a_min = 2 * ((erfc(1-1e-16) * 8) ** 2)
a_min
#case 4.1 construction: random sphecial distributions, similar extend to make WS<=2
num_distribution = 100
x_i = np.ndarray(shape=(3, num_distribution))
x_i[0] = 20 * np.random.rand(num_distribution) - 10
x_i[1] = 20 * np.random.rand(num_distribution) - 10
x_i[2] = 20 * np.random.rand(num_distribution) - 10
x_i = np.transpose(x_i)
K_i = np.ones(num_distribution)
a_i = 10 * np.random.rand(num_distribution) + a_min
#case 4.2 constuction: uniform distributed sphecial distributions, same WS index
num_distribution_in_a_box = 1
num_distribution_1D = 3
num_distribution = num_distribution_in_a_box * num_distribution_1D **3
x_i = np.zeros(shape=(num_distribution,3))
for i in range(0,num_distribution_1D):
for j in range(0,num_distribution_1D):
for k in range(0,num_distribution_1D):
x_i[i*num_distribution_1D*num_distribution_1D+j*num_distribution_1D+k] = [i,j,k]
K_i = np.ones(num_distribution)
a_i = 10 * np.random.rand(num_distribution) + a_min
[x0_i, scale_factor] = op.cartesian_scaling_to_unit_range(x_i)
plot_3d(x0_i)
a_i
# analytical answer
pair_potential = np.zeros(shape=(num_distribution,num_distribution))
pre_factor = np.power(np.pi, 3)
for i in range(0, num_distribution):
for j in range(i+1, num_distribution):
pre_factor2 = K_i[i] * K_i[j] / ( np.power(a_i[i]*a_i[j], 1.5) * op.distance_cal(x0_i[i], x0_i[j]))
t_sqrt = np.sqrt(a_i[i]*a_i[j]/(a_i[i]+a_i[j])) * op.distance_cal(x0_i[i], x0_i[j]) * scale_factor[1]
pair_potential[i][j] = pre_factor * pre_factor2 * erf(t_sqrt)
pair_potential /= scale_factor[1]
pair_potential
J_analytic = np.zeros(num_distribution)
for i in range(0, num_distribution):
for j in range(0, num_distribution):
if j<i:
J_analytic[i] += pair_potential[j][i]
if j>i:
J_analytic[i] += pair_potential[i][j]
J_analytic
total_energy = 0.5 * sum(J_analytic)
total_energy
from fast_multipole_method import fmm
from fast_multipole_method import fmm_q_gaussain_distribution as fq
# build list of q_source
q_source = np.ndarray(shape=(len(x0_i)), dtype=fq)
for i in range(0, len(x0_i)):
q_source[i] = fq(x0_i[i], a_i[i], K_i[i])
btm_level = 3
p = 10
ws_index = 3
[J_far_field, J_near_field] = fmm(q_source, btm_level, p, scale_factor[1], ws_index)
J_far_field
J_near_field
J_total = J_far_field + J_near_field
J_total
total_energy = 0.5 * sum(J_total)
total_energy
J_error = np.abs(J_total-J_analytic) / J_analytic
J_error
###Output
_____no_output_____ |
Pytorch notebooks/Intro_DL_Pytorch_MNIST.ipynb | ###Markdown
transforms.Compose()* ToTensor() : Converts PIL Image to a tensor. Value range [0, 255] --> [0, 1]* Normalize(mean , std) : Normalizes data of each channel according to mean and standard deviation provided
###Code
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 128)
self.fc4 = nn.Linear(128, 64)
self.fc5 = nn.Linear(64, 10)
self.dropout = nn.Dropout(p=0.3)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
x = self.dropout(F.relu(self.fc4(x)))
x = F.log_softmax(self.fc5(x), dim=1)
return x
model = Classifier()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adagrad(model.parameters(), lr = 0.01, lr_decay = 1e-3)
images, labels = next(iter(trainloader))
epochs = 15
train_log, test_log = [], []
for epoch in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten the images
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
scores = model(images)
loss = criterion(scores, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
test_loss = 0
accuracy = 0
with torch.no_grad():
# Set the model to eval mode. Dropout = 0
model.eval()
for images, labels in testloader:
scores = model(images)
test_loss += criterion(scores, labels)
ps = torch.exp(scores)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
train_log.append(running_loss/len(trainloader))
test_log.append(test_loss/len(testloader))
print("Epoch: {}/{} |".format(epoch+1, epochs),
"Training Loss: {:.3f} |".format(running_loss/len(trainloader)),
"Test Loss: {:.3f} |".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
# Set the model back to train mode. Dropout value restored
model.train()
plt.plot(train_log, label='Training loss')
plt.plot(test_log, label='Validation loss')
plt.legend(frameon=False)
%config InlineBackend.figure_format = 'retina'
def view_classify(img, ps, version="MNIST"):
''' Function for viewing an image and it's predicted classes.
'''
ps = ps.data.numpy().squeeze()
fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
ax1.axis('off')
ax2.barh(np.arange(10), ps)
ax2.set_aspect(0.1)
ax2.set_yticks(np.arange(10))
if version == "MNIST":
ax2.set_yticklabels(np.arange(10))
elif version == "Fashion":
ax2.set_yticklabels(['T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle Boot'], size='small');
ax2.set_title('Class Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
model.eval()
images, labels = next(iter(trainloader))
img = images[1].view(1, 784)
with torch.no_grad():
output = model.forward(img)
ps = torch.exp(output)
view_classify(img.view(1, 28, 28), ps, version="Fashion")
###Output
_____no_output_____
###Markdown
Saving & loading a model* Save the architecture + weights + biases in a dictionary
###Code
# Saving a model
# print(model.state_dict().keys())
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
# Load a model
# checkpoint = torch.load(filepath)
# model = fc_model.Network(checkpoint['input_size'], checkpoint['output_size'],
# checkpoint['hidden_layers'])
# model.load_state_dict(checkpoint['state_dict'])
###Output
_____no_output_____ |
Section-04-Missing-Data-Imputation/04.14-Automatic-Imputation-Method-Detection-Sklearn.ipynb | ###Markdown
Automatic selection of best imputation technique with SklearnIn this notebook we will do a grid search over the imputation methods available in Scikit-learn to determine which imputation technique works best for this dataset and the machine learning model of choice.We will also train a very simple machine learning model as part of a small pipeline.We will use the House Price dataset.- To download the dataset please visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
# import classes for imputation
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
# import extra classes for modelling
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# load dataset with all the variables
data = pd.read_csv('../houseprice.csv',)
data.head()
# find categorical variables
# those of type 'Object' in the dataset
features_categorical = [c for c in data.columns if data[c].dtypes=='O']
# find numerical variables
# those different from object and also excluding the target SalePrice
features_numerical = [c for c in data.columns if data[c].dtypes!='O' and c !='SalePrice']
# inspect the categorical variables
data[features_categorical].head()
# inspect the numerical variables
data[features_numerical].head()
# separate intro train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop('SalePrice', axis=1), # just the features
data['SalePrice'], # the target
test_size=0.3, # the percentage of obs in the test set
random_state=0) # for reproducibility
X_train.shape, X_test.shape
# We create the preprocessing pipelines for both
# numerical and categorical data
# adapted from Scikit-learn code available here under BSD3 license:
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('numerical', numeric_transformer, features_numerical),
('categorical', categorical_transformer, features_categorical)])
# Note that to initialise the pipeline I pass any argument to the transformers.
# Those will be changed during the gridsearch below.
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', Lasso(max_iter=2000))])
# now we create the grid with all the parameters that we would like to test
param_grid = {
'preprocessor__numerical__imputer__strategy': ['mean', 'median'],
'preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant'],
'regressor__alpha': [10, 100, 200],
}
#grid_search = GridSearchCV(clf, param_grid, cv=5, iid=False, n_jobs=-1, scoring='r2')
grid_search = GridSearchCV(clf, param_grid, cv=5, n_jobs=-1, scoring='r2')
# cv=3 is the cross-validation
# no_jobs =-1 indicates to use all available cpus
# scoring='r2' indicates to evaluate using the r squared
# for more details in the grid parameters visit:
#https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
###Output
_____no_output_____
###Markdown
When setting the grid parameters, this is how we indicate the parameters:preprocessor__numerical__imputer__strategy': ['mean', 'median'],the above line of code indicates that I would like to test the mean and the median in the imputer step of the numerical processor.preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant']the above line of code indicates that I would like to test the most frequent or a constant value in the imputer step of the categorical processorclassifier__alpha': [0.1, 1.0, 0.5]the above line of code indicates that I want to test those 3 values for the alpha parameter of Lasso. Note that Lasso is the 'classifier' step of our last pipeline
###Code
# and now we train over all the possible combinations of the parameters above
grid_search.fit(X_train, y_train)
# and we print the best score over the train set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_train, y_train)))
# we can print the best estimator parameters like this
grid_search.best_estimator_
# and find the best fit parameters like this
grid_search.best_params_
# here we can see all the combinations evaluated during the gridsearch
grid_search.cv_results_['params']
# and here the scores for each of one of the above combinations
grid_search.cv_results_['mean_test_score']
# and finally let's check the performance over the test set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
###Output
best linear regression from grid search: 0.738
###Markdown
This model overfits to the train set, look at the r2 of 0.93 obtained for the train set vs 0.738 for the test set.We will try to reduce this over-fitting as we progress in the course.
###Code
import pprint
pprint.pprint(grid_search.cv_results_.keys())
# here we can see all the combinations evaluated during the gridsearch
grid_search.cv_results_
###Output
_____no_output_____
###Markdown
Automatic selection of best imputation technique with SklearnIn this notebook we will do a grid search over the imputation methods available in Scikit-learn to determine which imputation technique works best for this dataset and the machine learning model of choice.We will also train a very simple machine learning model as part of a small pipeline.We will use the House Price dataset.- To download the dataset please visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
# import classes for imputation
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
# import extra classes for modelling
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# load dataset with all the variables
data = pd.read_csv('../houseprice.csv',)
data.head()
# find categorical variables
# those of type 'Object' in the dataset
features_categorical = [c for c in data.columns if data[c].dtypes=='O']
# find numerical variables
# those different from object and also excluding the target SalePrice
features_numerical = [c for c in data.columns if data[c].dtypes!='O' and c !='SalePrice']
# inspect the categorical variables
data[features_categorical].head()
# inspect the numerical variables
data[features_numerical].head()
# separate intro train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop('SalePrice', axis=1), # just the features
data['SalePrice'], # the target
test_size=0.3, # the percentage of obs in the test set
random_state=0) # for reproducibility
X_train.shape, X_test.shape
# We create the preprocessing pipelines for both
# numerical and categorical data
# adapted from Scikit-learn code available here under BSD3 license:
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('numerical', numeric_transformer, features_numerical),
('categorical', categorical_transformer, features_categorical)])
# Note that to initialise the pipeline I pass any argument to the transformers.
# Those will be changed during the gridsearch below.
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', Lasso(max_iter=2000))])
# now we create the grid with all the parameters that we would like to test
param_grid = {
'preprocessor__numerical__imputer__strategy': ['mean', 'median'],
'preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant'],
'classifier__alpha': [10, 100, 200],
}
grid_search = GridSearchCV(clf, param_grid, cv=5, iid=False, n_jobs=-1, scoring='r2')
# cv=3 is the cross-validation
# no_jobs =-1 indicates to use all available cpus
# scoring='r2' indicates to evaluate using the r squared
# for more details in the grid parameters visit:
#https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
###Output
_____no_output_____
###Markdown
When setting the grid parameters, this is how we indicate the parameters:preprocessor__numerical__imputer__strategy': ['mean', 'median'],the above line of code indicates that I would like to test the mean and the median in the imputer step of the numerical processor.preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant']the above line of code indicates that I would like to test the most frequent or a constant value in the imputer step of the categorical processorclassifier__alpha': [0.1, 1.0, 0.5]the above line of code indicates that I want to test those 3 values for the alpha parameter of Lasso. Note that Lasso is the 'classifier' step of our last pipeline
###Code
# and now we train over all the possible combinations of the parameters above
grid_search.fit(X_train, y_train)
# and we print the best score over the train set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_train, y_train)))
# we can print the best estimator parameters like this
grid_search.best_estimator_
# and find the best fit parameters like this
grid_search.best_params_
# here we can see all the combinations evaluated during the gridsearch
grid_search.cv_results_['params']
# and here the scores for each of one of the above combinations
grid_search.cv_results_['mean_test_score']
# and finally let's check the performance over the test set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
###Output
best linear regression from grid search: 0.738
###Markdown
Automatic selection of best imputation technique with SklearnIn this notebook we will do a grid search over the imputation methods available in Scikit-learn to determine which imputation technique works best for this dataset and the machine learning model of choice.We will also train a very simple machine learning model as part of a small pipeline.We will use the House Price dataset.- To download the dataset please visit the lecture **Datasets** in **Section 1** of the course.
###Code
import pandas as pd
import numpy as np
# import classes for imputation
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
# import extra classes for modelling
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# load dataset with all the variables
data = pd.read_csv('../houseprice.csv',)
data.head()
# find categorical variables
# those of type 'Object' in the dataset
features_categorical = [c for c in data.columns if data[c].dtypes=='O']
# find numerical variables
# those different from object and also excluding the target SalePrice
features_numerical = [c for c in data.columns if data[c].dtypes!='O' and c !='SalePrice']
# inspect the categorical variables
data[features_categorical].head()
# inspect the numerical variables
data[features_numerical].head()
# separate intro train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop('SalePrice', axis=1), # just the features
data['SalePrice'], # the target
test_size=0.3, # the percentage of obs in the test set
random_state=0) # for reproducibility
X_train.shape, X_test.shape
# We create the preprocessing pipelines for both
# numerical and categorical data
# adapted from Scikit-learn code available here under BSD3 license:
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('numerical', numeric_transformer, features_numerical),
('categorical', categorical_transformer, features_categorical)])
# Note that to initialise the pipeline I pass any argument to the transformers.
# Those will be changed during the gridsearch below.
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', Lasso(max_iter=2000))])
# now we create the grid with all the parameters that we would like to test
param_grid = {
'preprocessor__numerical__imputer__strategy': ['mean', 'median'],
'preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant'],
'regressor__alpha': [10, 100, 200],
}
grid_search = GridSearchCV(clf, param_grid, cv=5, iid=False, n_jobs=-1, scoring='r2')
# cv=3 is the cross-validation
# no_jobs =-1 indicates to use all available cpus
# scoring='r2' indicates to evaluate using the r squared
# for more details in the grid parameters visit:
#https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
###Output
_____no_output_____
###Markdown
When setting the grid parameters, this is how we indicate the parameters:preprocessor__numerical__imputer__strategy': ['mean', 'median'],the above line of code indicates that I would like to test the mean and the median in the imputer step of the numerical processor.preprocessor__categorical__imputer__strategy': ['most_frequent', 'constant']the above line of code indicates that I would like to test the most frequent or a constant value in the imputer step of the categorical processorclassifier__alpha': [0.1, 1.0, 0.5]the above line of code indicates that I want to test those 3 values for the alpha parameter of Lasso. Note that Lasso is the 'classifier' step of our last pipeline
###Code
# and now we train over all the possible combinations of the parameters above
grid_search.fit(X_train, y_train)
# and we print the best score over the train set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_train, y_train)))
# we can print the best estimator parameters like this
grid_search.best_estimator_
# and find the best fit parameters like this
grid_search.best_params_
# here we can see all the combinations evaluated during the gridsearch
grid_search.cv_results_['params']
# and here the scores for each of one of the above combinations
grid_search.cv_results_['mean_test_score']
# and finally let's check the performance over the test set
print(("best linear regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
###Output
best linear regression from grid search: 0.738
|
_notebooks/2020-12-23-simple-backprop.ipynb | ###Markdown
Bare-Bones Backpropagation> Demonstrating the simplest possible backpropagation implementation, with all the clutter removed.- toc:true- badges: true- comments: true- author: Charlie Blake- categories: [neural-networks, backpropagation]- image: images/blog/simple-backprop/viz.png Making Backprop Simple The first few times I came across backpropagation I struggled to get a feel for what was going on. It's not just enoughto follow the equations - I couldn't visualise the operations and updates, especially the mysterious backwards pass. If someone had shown me then how simple it was to implement the key part of the training algorithm that figures out how to update the weights, then it would have helped me a lot. I understood how the chain rule worked and its relevance here, but I didn't have a picture of it in my head. I got bogged down in the matrix notation and PyTorch tensors and lost sight of what was really going on.So this is a simple-as-possible backprop implementation, to clear up that confusion. I don't go into the maths; I assume the reader already knows what's going on in theory, but doesn't have a great feel for what happens in practice.This can also serve as a reference for how to implement this from scratch a clean way. Enjoy! Setup First things first, there's some setup to do. This isn't a tutorial on data loading, so I'm just going to paste somecode for loading up our dataset and we can ignore the details. The only thing worth noting is that we'll be using theclassic *MNIST* dataset:
###Code
#collapse-hide
import math
import torch
import torchvision
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
from torch.nn.functional import one_hot
from functools import reduce
import altair as alt
import pandas as pd
batch_sz = 64
train = DataLoader(MNIST('data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
lambda x: torch.flatten(x),
])
), batch_size=batch_sz, shuffle=True)
###Output
_____no_output_____
###Markdown
This `train` dataloader can be iterated over and returns minibatches of shape `(batch__sz, input_dim)`.In our case, these values are `(64, 28*28=784)` Layer-by-layer We'll construct our neural network by making classes for each layer. We'll use a standard setup of: linear layer, ReLU, linear layer, softmax; plus a cross-entropy loss.For each layer class we require two methods:- **`__call__(self, x)`**: implements the forward pass. `__call__` allows us to feed an input through the layer by treating the initialised layer object as a function. For example: `relu_layer = ReLU(); output = relu_layer(input)`.- **`bp_input(self, grad)`**: implements the backward pass, allowing us to backpropagate the gradient vector through the layer. The `grad` parameter is a matrix of partial derivatives of the loss, with respect to the data sent from the given layer to the next. As such, it is a `(batch_sz, out)` matrix. The job of `bp_input` is to return a `(batch_sz, in)` matrix to be sent to the next layer by multiplying `grad` by the derivative of the forward pass with respect to the _input_ (or an equivalent operation).There are two other methods we sometimes wish to implement for different layers:- **`__init__(self, ...)`**: initialises the layer, e.g. weights.- **`bp_param(self, grad)`**: the "final stop" of the backwards pass. Only applicable for layers with trainable weights. Similar to `bp_input`, but calculates the derivative with respect to the _weights_ of the layer. Should return a matrix with the same shape as the weights (`self.W`) to be updated. > Important: The key point to recall when visualising this is that when we have a batch dimension it is always the first dimension. For both the forward and backward pass. This makes everything much simpler! Linear LayerLet's start with the linear layer. We do the following:1. We start by initialising the weights (in this case using the Xavier initialisation).2. We then implement the call method. Rather than adding an explicit bias, we append a vector of ones to the layer's input (this is equivalent, and makes backprop simpler).3. Backpropagation with respect to the input is just right multiplication by the transpose of the weight matrix (adjusted to remove the added 1s column)4. Backpropagation with respect to the output is left multiplication by the transpose of the input matrix.
###Code
class LinearLayer:
def __init__(self, in_sz, out_sz): self.W = self._xavier_init(in_sz + 1, out_sz) # (in+1, out)
def _xavier_init(self, i, o): return torch.Tensor(i, o).uniform_(-1, 1) * math.sqrt(6./(i + o))
def __call__(self, X): # (batch_sz, in)
self.X = torch.cat([X, torch.ones(X.shape[0], 1)], dim=1) # (batch_sz, in+1)
return self.X @ self.W # (batch_sz, in+1) @ (in+1, out) = (batch_sz, out)
def bp_input(self, grad): return (grad @ self.W.T)[:,:-1] # (batch_sz, out) @ (out, in) = (batch_sz, in)
def bp_param(self, grad): return self.X.T @ grad # (in+1, batch_sz) @ (batch_sz, out) = (in+1, out)
###Output
_____no_output_____
###Markdown
ReLU LayerSome non-linearity is a must! Bring on the RelU function. The implementation is pretty obvious here. `clamp()` is doing all the work.
###Code
class ReLU:
def __call__(self, X):
self.X = X
return X.clamp(min=0) # (batch_sz, in)
def bp_input(self, grad): return grad * (self.X > 0).float() # (batch_sz, in)
###Output
_____no_output_____
###Markdown
Softmax & Cross Entropy LossWhat? Both at once, why would you do this??This is quite common, and I can justify it in two ways:1. This layer-loss combination often go together, so why not put them all in one layer? This saves us from having to do two separate forward and backward propagation steps.2. I won't prove it here, but it turns out that the derivative of the loss with respect to the input to the softmax, is much simpler than the two intermediate derivative operations, and bypasses the numerical stability issues that arise when we do the exponential and the logarithm. Phew!The downside here is that is we're just doing _inference_ then we only want the softmax output. But for the purposes of this tutorial we only really care about training. So this will do just fine!There's a trick in the second line of the softmax implementation: it turns out subtracting the argmax from the softmax input keeps the output the same, but the intermediate values are more numerically stable. How neat!Finally, we examine the backprop step. It's so simple! Our starting grad for backprop (the initial `grad` value passed in is just the ones vector) is the difference in our predicted output vector and the actual one-hot encoded label. This is so intuitive and wonderful.> Tip: This is exactly the same derivative as when we don't use a softmax layer and apply an MSE loss (i.e. the regression case). We can thus think of softmax + cross entropy as a way of getting to the same underlying backprop, but in the classification case.
###Code
class SoftmaxCrossEntropyLoss: # (batch_sz, in=out) for all dims in this layer
def __call__(self, X, Y):
self.Y = Y
self.Y_prob = self._softmax(X)
self.loss = self._cross_entropy_loss(Y, self.Y_prob)
return self.Y_prob, self.loss
def _softmax(self, X):
self.X = X
X_adj = X - X.amax(dim=1, keepdim=True)
exps = torch.exp(X_adj)
return exps / exps.sum(axis=1, keepdim=True)
def _cross_entropy_loss(self, Y, Y_prob): return (-Y * torch.log(Y_prob)).sum(axis=1).mean()
def bp_input(self, grad): return (self.Y_prob - self.Y) * grad
###Output
_____no_output_____
###Markdown
Putting it all togetherLet's bring these layers together in a class: our `NeuralNet` implementation.The `evaluate()` function does two things. Firstly, it runs the forward pass by chaining the `__call__()` functions, to generate label probabilities. Secondly, it uses the labels passed to it to calculate the loss and percentage correctly predicted.> Note: for this simplified example we don't have a pure inference function, but we could add one with a small change to `SoftmaxCrossEntropyLoss`.The `gradient_descent()` function then gets the matrix of updates for each weight matrix and applies the update. The key bit here is how `backprop()` works. Going backwards through the computation graph we chain the backprop with respect to input methods. Then for each weighted layer we want to update, we apply the backprop with respect to prameters method to the relevant gradient vector.
###Code
class NeuralNet:
def __init__(self, input_size=28*28, hidden_size=32, output_size=10, alpha=0.001):
self.alpha = alpha
self.z1 = LinearLayer(input_size, hidden_size)
self.a1 = ReLU()
self.z2 = LinearLayer(hidden_size, output_size)
self.loss = SoftmaxCrossEntropyLoss()
def evaluate(self, X, Y):
out = self.z2(self.a1(self.z1(X)))
correct = torch.eq(out.argmax(axis=1), Y).double().mean()
Y_prob, loss = self.loss(out, one_hot(Y, 10))
return Y_prob, correct, loss
def gradient_descent(self):
delta_W1, delta_W2 = self.backprop()
self.z1.W -= self.alpha * delta_W1
self.z2.W -= self.alpha * delta_W2
def backprop(self):
d_out = torch.ones(*self.loss.Y.shape)
d_z2 = self.loss.bp_input(d_out)
d_a1 = self.z2.bp_input(d_z2)
d_z1 = self.a1.bp_input(d_a1)
d_w2 = self.z2.bp_param(d_z2)
d_w1 = self.z1.bp_param(d_z1)
return d_w1, d_w2
###Output
_____no_output_____
###Markdown
Training the modelWe're almost there! I won't go into this bit too much because this tutorial isn't about training loops, but it's all very standard here.We break the training data into minibatches and train on them over 10 epochs. The evaluation metrics plotted are those recorded during regular training.> Warning: these results are only on the training set! In practice we should *always* plot performance on a test set, but we don't want to clutter the tutorial with this extra detail.
###Code
#collapse-hide
model = NeuralNet()
stats = {'correct': [], 'loss': [], 'epoch': []}
for epoch in range(2):
correct, loss = 0, 0
for i, (X, y) in enumerate(train):
y_prob, batch_correct, batch_loss = model.evaluate(X, y)
model.gradient_descent()
correct += batch_correct / len(train)
loss += batch_loss / len(train)
stats['correct'].append(correct.item())
stats['loss'].append(loss.item())
stats['epoch'].append(epoch)
print(f'epoch: {epoch} | correct: {correct:.2f}, loss: {loss:.2f}')
base = alt.Chart(pd.DataFrame.from_dict(stats)).mark_line() \
.encode(alt.X('epoch', axis=alt.Axis(title='epoch')))
line1 = base.mark_line(stroke='#5276A7', interpolate='monotone') \
.encode(alt.Y('loss' , axis=alt.Axis(title='Loss' , titleColor='#5276A7'), scale=alt.Scale(domain=[0.0, max(stats['loss' ])])), tooltip='loss' )
line2 = base.mark_line(stroke='#57A44C', interpolate='monotone') \
.encode(alt.Y('correct', axis=alt.Axis(title='Correct', titleColor='#57A44C'), scale=alt.Scale(domain=[min(stats['correct']), 1.0])), tooltip='correct')
alt.layer(line1, line2).resolve_scale(y = 'independent')
###Output
_____no_output_____ |
master/tutorial_ptrans.ipynb | ###Markdown
Translação PeriódicaA translação periódica é uma translação de uma imagem que se repete periodicamente como se fosse umaparede ladrilhada e cada ladrilho fosse a imagem em questão. A translação periódica da imagem **f** pelo deslocamento (dh,dw) é ilustrada no exemplo a seguir.A imagem **f4** é montada a partir de 4 imagens. Sua montagem é feita com o auxílio das funções**vstack** e **hstack**. Estas funções do NumPy concatenam as imagens da tupla na vertical (vstack) ouhorizontal (hstack). No caso iremos fazer uma translação por (-30,-80). Note que no trecho do códigoabaixo, estamos usando mapeamento inverso e há necessidade de multiplicarmos (dh,dw) por -1. Comoa translação é periódica e estamos montando as 4 imagens, além de multiplicarmos por -1 somamos (H,W)e fazemos o módulo por (H,W) para que o valor fique entre (0,0) e (H-1,W-1):
###Code
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
f = mpimg.imread('../data/cameraman.tif')
plt.imshow(f,cmap='gray'),plt.title('original')
H,W = f.shape
dh,dw = (-30,-80)
dhi = (-dh + H) % H # mapeamento inverso igual feito em iaffine (inversa de T)
dwi = (-dw + W) % W # mapeamento inverso
f2 = np.vstack((f,f))
f4 = np.hstack((f2,f2))
plt.figure(1)
plt.imshow(f4,cmap='gray'), plt.title('montagem periódica de f')
f4[dhi:dhi+H,dwi ] = 255
f4[dhi:dhi+H,dwi+W-1] = 255
f4[dhi ,dwi:dwi+W] = 255
f4[dhi+H-1,dwi:dwi+W] = 255
plt.figure(2)
plt.imshow(f4,cmap='gray'), plt.title('marcação de f transladada')
g = f4[dhi:dhi+H,dwi:dwi+W]
plt.figure(3)
plt.imshow(g,cmap='gray'), plt.title('f periodicamente translada por (%d,%d)' % (dh,dw))
###Output
_____no_output_____
###Markdown
Translação PeriódicaA translação periódica é uma translação de uma imagem que se repete periodicamente como se fosse umaparede ladrilhada e cada ladrilho fosse a imagem em questão. A translação periódica da imagem **f** pelo deslocamento (dh,dw) é ilustrada no exemplo a seguir.A imagem **f4** é montada a partir de 4 imagens. Sua montagem é feita com o auxílio das funções**vstack** e **hstack**. Estas funções do NumPy concatenam as imagens da tupla na vertical (vstack) ouhorizontal (hstack). No caso iremos fazer uma translação por (-30,-80). Note que no trecho do códigoabaixo, estamos usando mapeamento inverso e há necessidade de multiplicarmos (dh,dw) por -1. Comoa translação é periódica e estamos montando as 4 imagens, além de multiplicarmos por -1 somamos (H,W)e fazemos o módulo por (H,W) para que o valor fique entre (0,0) e (H-1,W-1):
###Code
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
f = mpimg.imread('../data/cameraman.tif')
plt.imshow(f,cmap='gray'),plt.title('original')
H,W = f.shape
dh,dw = (-30,-80)
dhi = (-dh + H) % H # mapeamento inverso igual feito em iaffine (inversa de T)
dwi = (-dw + W) % W # mapeamento inverso
f2 = np.vstack((f,f))
f4 = np.hstack((f2,f2))
plt.figure(1)
plt.imshow(f4,cmap='gray'), plt.title('montagem periódica de f')
f4[dhi:dhi+H,dwi ] = 255
f4[dhi:dhi+H,dwi+W-1] = 255
f4[dhi ,dwi:dwi+W] = 255
f4[dhi+H-1,dwi:dwi+W] = 255
plt.figure(2)
plt.imshow(f4,cmap='gray'), plt.title('marcação de f transladada')
g = f4[dhi:dhi+H,dwi:dwi+W]
plt.figure(3)
plt.imshow(g,cmap='gray'), plt.title('f periodicamente translada por (%d,%d)' % (dh,dw))
###Output
_____no_output_____ |
SourceCodes/P2_evaluation/ARIMA/ARIMA__SP_P500.ipynb | ###Markdown
Packages
###Code
!pip install pmdarima
!pip install arch
!pip install yfinance
import numpy as np
import pandas as pd
import scipy
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
import statsmodels.graphics.tsaplots as sgt
import statsmodels.tsa.stattools as sts
from statsmodels.tsa.arima_model import ARIMA
from arch import arch_model
import yfinance
import warnings
warnings.filterwarnings("ignore")
sns.set()
###Output
_____no_output_____
###Markdown
Loading the data
###Code
raw_data = yfinance.download (tickers = "WMT", start = "2015-01-02",
end = "2020-05-08", interval = "1d", group_by = 'ticker', auto_adjust = True, treads = True)
df_comp = raw_data.copy()
df_comp=df_comp.asfreq('d')
df_comp=df_comp.fillna(method='ffill')
Apple_df = df_comp
###Output
_____no_output_____
###Markdown
Creating Returns
###Code
Apple_df['return'] = df_comp.Close.pct_change(1).mul(100)
Apple_df = Apple_df.drop(columns='Volume')
###Output
_____no_output_____
###Markdown
Walmart dataset
###Code
Apple_df.tail()
Apple_df.summa
S_P5
###Output
_____no_output_____
###Markdown
S&P500 dataset
###Code
df_comp.tail()
df_comp['norm_ret_spx'] = df_comp.ret_spx.div(df_comp.ret_spx[1])*100
###Output
_____no_output_____
###Markdown
Splitting the Data
###Code
size = int(len(df_comp)*0.8)
df_train, df_test = df_comp.iloc[:size], df_comp.iloc[size:]
df_train.shape
###Output
_____no_output_____
###Markdown
Fitting a Model
###Code
model_ar = ARIMA(df_train.Close, order = (1,0,0))
results_ar = model_ar.fit()
model_ar_510 = ARIMA(df_train.Close, order = (5,0,0))
results_ar_510 = model_ar_510.fit()
###Output
_____no_output_____
###Markdown
Simple Forecasting
###Code
df_train.tail()
df_test.head()
df_test.tail()
# create variables that will help us change the periods easily instead of typing them up every time
# make sure the start and end dates are business days, otherwise the code will result in an error
start_date = "2015-04-16"
end_date = "2020-05-11"
end_date = "2020-05-11"
df_pred = results_ar.predict(start = start_date, end = end_date)
end_date = "2020-05-11"
df_pred_510 = results_ar_510.predict(start = start_date, end = end_date)
df_pred_510[start_date:end_date].plot(figsize = (20,5), color = "red")
df_test.Close[start_date:end_date].plot(color = "blue")
plt.title("S&P500 ARIMA prediction", size = 12)
plt.show()
df_test.shape
from statsmodels.tools.eval_measures import rmse
from statsmodels.tools.eval_measures import mse
print("RMSE %.4f" % rmse(df_test.Close,df_pred))
print("MSE %.4f" %)
print("MSE %4f" % mse(df_test.Close,df_pred_510))
###Output
_____no_output_____ |
Big-Data-Clusters/CU8/Public/content/common/sop011-set-kubernetes-context.ipynb | ###Markdown
SOP011 - Set kubernetes configuration context=============================================Description-----------Set the kubernetes configuration to use.NOTE: To view available contexts use the following TSG:- [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb)Steps----- Parameters
###Code
context_name = None
###Output
_____no_output_____
###Markdown
Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop011-set-kubernetes-context.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', 'ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', "TSG126 - azdata fails with 'accept the license terms to use this product'", '../repair/tsg126-accept-license-terms.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
###Output
_____no_output_____
###Markdown
List available contexts
###Code
if context_name is None:
contexts = run('kubectl config get-contexts --output name', return_output=True)
contexts =contexts.split("\n")[:-1]
counter = 0
for context in contexts:
print(f'{counter}. {context}')
counter += 1
else:
print(f'context_name: {context_name}')
###Output
_____no_output_____
###Markdown
Select a context (if not set as a parameter)
###Code
if context_name is None:
context_name = contexts[5] # <-- select context here (set ordinal)
print(f'context_name: {context_name}')
###Output
_____no_output_____
###Markdown
Log out using azdataTo avoid a situation where the `Kubernetes` context is for a clusterwhich is not hosting the Big Data Cluster `azdata` currently loggedinto.
###Code
run('azdata logout')
###Output
_____no_output_____
###Markdown
Set the kubernetes configuration to use
###Code
run(f'kubectl config use-context {context_name}')
print('Notebook execution complete.')
###Output
_____no_output_____ |
07/Animal_Panda_Homework_7_Skinner.ipynb | ###Markdown
*1. Import pandas with the right name
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
*2. Set all graphics from matplotlib to display inline
###Code
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
*3. Import pandas with the right name
###Code
#for encoding the command would look smth like this:
#df = pd.read_csv("XXXXXXXXXXXXXXXXX.csv", encoding='mac_roman')
df = pd.read_csv("Animal_Data/07-hw-animals.csv")
###Output
_____no_output_____
###Markdown
*4. Display the names of the columns in the csv
###Code
df.columns
###Output
_____no_output_____
###Markdown
*5. Display the first 3 animals.
###Code
df.head(3)
###Output
_____no_output_____
###Markdown
*6. Sort the animals to see the 3 longest animals.
###Code
df.sort_values(by='length', ascending=False).head(3)
###Output
_____no_output_____
###Markdown
*7. What are the counts of the different values of the "animal" column? a.k.a. how many cats and how many dogs.
###Code
df['animal'].value_counts()
###Output
_____no_output_____
###Markdown
*8. Only select the dogs.
###Code
#df['animal'] == 'dog' this just tests, whether row is a dog or not, True or False
#is_dog = df['animal'] == 'dog'
#df[is_dog]
df[df['animal'] == 'dog']
###Output
_____no_output_____
###Markdown
*9. Display all of the animals that are greater than 40 cm.
###Code
df[df['length'] > 40]
#del df['feet']
###Output
_____no_output_____
###Markdown
*10. 'length' is the animal's length in cm. Create a new column called inches that is the length in inches.
###Code
df['inches'] = df['length'] * 0.394
df.head()
###Output
_____no_output_____
###Markdown
*11. Save the cats to a separate variable called "cats." Save the dogs to a separate variable called "dogs."
###Code
dogs = df[df['animal'] == 'dog']
cats = df[df['animal'] == 'cat']
###Output
_____no_output_____
###Markdown
*12. Display all of the animals that are cats and above 12 inches long. First do it using the "cats" variable, then do it using your normal dataframe.
###Code
cats[cats['inches'] > 12]
#df[df[df[df['animal'] == 'cat']'inches'] > 12]
#df[df['animal'] == 'cat']&
#df[df['inches'] > 12]
#pd.read_csv('imdb.txt')
# .sort(columns='year')
# .filter('year >1990')
# .to_csv('filtered.csv')
df[(df['animal'] == 'cat') & (df['inches'] > 12)]
#3 > 2 & 4 > 3
#true & true
#true
#3 > 2 & 4 > 3
#true & 4 > 3
#(3 > 2) & (4 > 3)
###Output
_____no_output_____
###Markdown
*13. What's the mean length of a cat?
###Code
df[df['animal'] == 'cat'].describe()
###Output
_____no_output_____
###Markdown
*14. What's the mean length of a dog?
###Code
df[df['animal'] == 'dog'].describe()
###Output
_____no_output_____
###Markdown
*15. Use groupby to accomplish both of the above tasks at once.
###Code
df.groupby(['animal'])['inches'].describe()
###Output
_____no_output_____
###Markdown
*16. Make a histogram of the length of dogs. I apologize that it is so boring.
###Code
df[df['animal'] == 'dog'].hist()
###Output
_____no_output_____
###Markdown
*17. Change your graphing style to be something else (anything else!)
###Code
import matplotlib.pyplot as plt
plt.style.available
plt.style.use('ggplot')
dogs['inches'].hist()
###Output
_____no_output_____
###Markdown
*18. Make a horizontal bar graph of the length of the animals, with their name as the label (look at the billionaires notebook I put on Slack!)
###Code
df['length'].plot(kind='bar')
#or:
df.plot(kind='barh', x='name', y='length', legend=False)
###Output
_____no_output_____
###Markdown
*19. Make a sorted horizontal bar graph of the cats, with the larger cats on top.
###Code
cats_sorted = cats.sort_values(by='length', ascending=True).head(3)
cats_sorted.plot(kind='barh', x='name', y='length', legend=False)
#or:
df[df['animal'] == 'cat'].sort_values(by='length', ascending=True).plot(kind='barh', x='name', y='length')
###Output
_____no_output_____ |
PA4/Bonus-Model_Policy_Network-HyperparameterTuning.ipynb | ###Markdown
Model-Based RL - TunedIn this exercise you will implement a policy and model network which work in tandem to solve the CartPole reinforcement learning problem. This is a bonus task where this Model Policy Network is tuned. Loading libraries and starting CartPole environment
###Code
from __future__ import print_function
import numpy as np
try:
import cPickle as pickle
except:
import pickle
import tensorflow as tf
%matplotlib inline
import matplotlib.pyplot as plt
import math
import sys
if sys.version_info.major > 2:
xrange = range
del sys
import gym
env = gym.make('CartPole-v0')
###Output
/home/u20842/.local/lib/python3.6/site-packages/gym/envs/registration.py:14: PkgResourcesDeprecationWarning: Parameters to load are deprecated. Call .resolve and .require separately.
result = entry_point.load(False)
###Markdown
Setting Hyper-parameters
###Code
# hyperparameters
H = 16 # number of hidden layer neurons
learning_rate = 1e-2
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
resume = False # resume from previous checkpoint?
model_bs = 3 # Batch size when learning from model
real_bs = 3 # Batch size when learning from real environment
# model initialization
D = 4 # input dimensionality
###Output
_____no_output_____
###Markdown
Policy Network
###Code
tf.reset_default_graph()
observations = tf.placeholder(tf.float32, [None,4] , name="input_x")
W1 = tf.get_variable("W1", shape=[4, H],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(observations,W1))
W2 = tf.get_variable("W2", shape=[H, 1],
initializer=tf.contrib.layers.xavier_initializer())
score = tf.matmul(layer1,W2)
probability = tf.nn.sigmoid(score)
tvars = tf.trainable_variables()
input_y = tf.placeholder(tf.float32,[None,1], name="input_y")
advantages = tf.placeholder(tf.float32,name="reward_signal")
adam = tf.train.AdamOptimizer(learning_rate=learning_rate)
W1Grad = tf.placeholder(tf.float32,name="batch_grad1")
W2Grad = tf.placeholder(tf.float32,name="batch_grad2")
batchGrad = [W1Grad,W2Grad]
################################################################################
# TODO: Implement the loss function. #
# This sends the weights in the direction of making actions that gave good #
# advantage (reward overtime) more likely, and actions that didn't less likely.#
################################################################################
loglik = tf.log(input_y*(input_y - probability) + (1 - input_y)*(input_y + probability))
loss = -tf.reduce_mean(loglik * advantages)
################################################################################
# END OF YOUR CODE #
################################################################################
newGrads = tf.gradients(loss,tvars)
updateGrads = adam.apply_gradients(zip(batchGrad,tvars))
###Output
_____no_output_____
###Markdown
Model NetworkHere we implement a multi-layer neural network that predicts the next observation, reward, and done state from a current state and action.
###Code
mH = 256 # model layer size
input_data = tf.placeholder(tf.float32, [None, 5])
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [mH, 50])
softmax_b = tf.get_variable("softmax_b", [50])
previous_state = tf.placeholder(tf.float32, [None,5] , name="previous_state")
W1M = tf.get_variable("W1M", shape=[5, mH],
initializer=tf.contrib.layers.xavier_initializer())
B1M = tf.Variable(tf.zeros([mH]),name="B1M")
layer1M = tf.nn.relu(tf.matmul(previous_state,W1M) + B1M)
W2M = tf.get_variable("W2M", shape=[mH, mH],
initializer=tf.contrib.layers.xavier_initializer())
B2M = tf.Variable(tf.zeros([mH]),name="B2M")
layer2M = tf.nn.relu(tf.matmul(layer1M,W2M) + B2M)
wO = tf.get_variable("wO", shape=[mH, 4],
initializer=tf.contrib.layers.xavier_initializer())
wR = tf.get_variable("wR", shape=[mH, 1],
initializer=tf.contrib.layers.xavier_initializer())
wD = tf.get_variable("wD", shape=[mH, 1],
initializer=tf.contrib.layers.xavier_initializer())
bO = tf.Variable(tf.zeros([4]),name="bO")
bR = tf.Variable(tf.zeros([1]),name="bR")
bD = tf.Variable(tf.ones([1]),name="bD")
predicted_observation = tf.matmul(layer2M,wO,name="predicted_observation") + bO
predicted_reward = tf.matmul(layer2M,wR,name="predicted_reward") + bR
predicted_done = tf.sigmoid(tf.matmul(layer2M,wD,name="predicted_done") + bD)
true_observation = tf.placeholder(tf.float32,[None,4],name="true_observation")
true_reward = tf.placeholder(tf.float32,[None,1],name="true_reward")
true_done = tf.placeholder(tf.float32,[None,1],name="true_done")
predicted_state = tf.concat([predicted_observation,predicted_reward,predicted_done],1)
observation_loss = tf.square(true_observation - predicted_observation)
reward_loss = tf.square(true_reward - predicted_reward)
done_loss = tf.multiply(predicted_done, true_done) + tf.multiply(1-predicted_done, 1-true_done)
done_loss = -tf.log(done_loss)
model_loss = tf.reduce_mean(observation_loss + done_loss + reward_loss)
modelAdam = tf.train.AdamOptimizer(learning_rate=learning_rate)
updateModel = modelAdam.minimize(model_loss)
###Output
_____no_output_____
###Markdown
Helper-functions
###Code
def resetGradBuffer(gradBuffer):
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
return gradBuffer
def discount_rewards(r):
################################################################################
# TODO: Implement the discounted rewards function #
# Return discounted rewards weighed by gamma. Each reward will be replaced #
# with a weight reward that involves itself and all the other rewards occuring #
# after it. The later the reward after it happens, the less effect it has on #
# the current rewards's discounted reward #
# Hint: [r0, r1, r2, ..., r_N] will look someting like: #
# [(r0 + r1*gamma^1 + ... r_N*gamma^N), (r1 + r2*gamma^1 + ...), ...] #
################################################################################
rnew = np.copy(r)
for i in range(1, len(rnew)):
rnew[:len(r)-i] += gamma**i * r[i:]
return rnew
################################################################################
# END OF YOUR CODE #
################################################################################
# This function uses our model to produce a new state when given a previous state and action
def stepModel(sess, xs, action):
toFeed = np.reshape(np.hstack([xs[-1][0],np.array(action)]),[1,5])
myPredict = sess.run([predicted_state],feed_dict={previous_state: toFeed})
reward = myPredict[0][:,4]
observation = myPredict[0][:,0:4]
observation[:,0] = np.clip(observation[:,0],-2.4,2.4)
observation[:,2] = np.clip(observation[:,2],-0.4,0.4)
doneP = np.clip(myPredict[0][:,5],0,1)
if doneP > 0.1 or len(xs)>= 300:
done = True
else:
done = False
return observation, reward, done
###Output
_____no_output_____
###Markdown
Training the Policy and Model
###Code
xs,drs,ys,ds = [],[],[],[]
running_reward = None
reward_sum = 0
episode_number = 1
real_episodes = 1
init = tf.global_variables_initializer()
batch_size = real_bs
drawFromModel = False # When set to True, will use model for observations
trainTheModel = True # Whether to train the model
trainThePolicy = False # Whether to train the policy
switch_point = 1
# Launch the graph
with tf.Session() as sess:
rendering = False
sess.run(init)
observation = env.reset()
x = observation
gradBuffer = sess.run(tvars)
gradBuffer = resetGradBuffer(gradBuffer)
while episode_number <= 5000:
# Start displaying environment once performance is acceptably high.
if (reward_sum/batch_size > 150 and drawFromModel == False) or rendering == True :
# env.render()
rendering = True
x = np.reshape(observation,[1,4])
tfprob = sess.run(probability,feed_dict={observations: x})
action = 1 if np.random.uniform() < tfprob else 0
# record various intermediates (needed later for backprop)
xs.append(x)
y = 1 if action == 0 else 0
ys.append(y)
# step the model or real environment and get new measurements
if drawFromModel == False:
observation, reward, done, info = env.step(action)
else:
observation, reward, done = stepModel(sess,xs,action)
reward_sum += reward
ds.append(done*1)
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
if done:
if drawFromModel == False:
real_episodes += 1
episode_number += 1
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(xs)
epy = np.vstack(ys)
epr = np.vstack(drs)
epd = np.vstack(ds)
xs,drs,ys,ds = [],[],[],[] # reset array memory
if trainTheModel == True:
################################################################################
# TODO: Run the model network and compute predicted_state #
# Output: 'pState' #
################################################################################
feed_dict = {
previous_state: np.hstack([epx[:-1], np.array([1-y for y in epy][:-1])]),
true_observation: epx[1:],
true_reward: epr[1:],
true_done: epd[1:]
}
tState = np.hstack([epx[1:], epr[1:], epd[1:]])
_, pState = sess.run([updateModel, predicted_state], feed_dict=feed_dict)
################################################################################
# END OF YOUR CODE #
################################################################################
if trainThePolicy == True:
################################################################################
# TODO: Run the policy network and compute newGrads #
# Output: 'tGrad' #
################################################################################
discounted_epr = discount_rewards(epr)
# size the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr //= np.std(discounted_epr)
tGrad = sess.run(newGrads, feed_dict={observations: epx, input_y: epy, advantages: discounted_epr})
################################################################################
# END OF YOUR CODE #
################################################################################
# If gradients becom too large, end training process
if np.sum(tGrad[0] == tGrad[0]) == 0:
break
for ix,grad in enumerate(tGrad):
gradBuffer[ix] += grad
if switch_point + batch_size == episode_number:
switch_point = episode_number
if trainThePolicy == True:
################################################################################
# TODO: #
# (1) Run the policy network and update gradients #
# (2) Reset gradBuffer to 0 #
################################################################################
sess.run(updateGrads, feed_dict={W1Grad: gradBuffer[0], W2Grad: gradBuffer[1]})
# gradBuffer reset is already done at the beginning of episode
################################################################################
# END OF YOUR CODE #
################################################################################
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
if drawFromModel == False:
print('World Perf: Episode %f. Reward %f. action: %f. mean reward %f.' % (real_episodes,reward_sum/real_bs,action, running_reward/real_bs))
if reward_sum/batch_size >= 200:
break
reward_sum = 0
# Once the model has been trained on 100 episodes
if episode_number > 100:
################################################################################
# TODO: Alternating between training the policy from the model and training #
# the model from the real environment. #
################################################################################
drawFromModel = not drawFromModel
trainTheModel = not trainTheModel
trainThePolicy = not trainThePolicy
################################################################################
# END OF YOUR CODE #
################################################################################
if drawFromModel == True:
observation = np.random.uniform(-0.1,0.1,[4]) # Generate reasonable starting point
batch_size = model_bs
else:
observation = env.reset()
batch_size = real_bs
print(real_episodes)
###Output
World Perf: Episode 4.000000. Reward 19.000000. action: 0.000000. mean reward 19.000000.
World Perf: Episode 7.000000. Reward 25.333333. action: 1.000000. mean reward 19.063333.
World Perf: Episode 10.000000. Reward 22.666667. action: 1.000000. mean reward 19.099367.
World Perf: Episode 13.000000. Reward 27.000000. action: 0.000000. mean reward 19.178373.
World Perf: Episode 16.000000. Reward 19.666667. action: 1.000000. mean reward 19.183256.
World Perf: Episode 19.000000. Reward 19.333333. action: 0.000000. mean reward 19.184757.
World Perf: Episode 22.000000. Reward 25.666667. action: 1.000000. mean reward 19.249576.
World Perf: Episode 25.000000. Reward 20.000000. action: 0.000000. mean reward 19.257080.
World Perf: Episode 28.000000. Reward 17.333333. action: 0.000000. mean reward 19.237843.
World Perf: Episode 31.000000. Reward 19.666667. action: 0.000000. mean reward 19.242131.
World Perf: Episode 34.000000. Reward 19.333333. action: 1.000000. mean reward 19.243043.
World Perf: Episode 37.000000. Reward 24.000000. action: 1.000000. mean reward 19.290612.
World Perf: Episode 40.000000. Reward 40.333333. action: 0.000000. mean reward 19.501040.
World Perf: Episode 43.000000. Reward 21.666667. action: 0.000000. mean reward 19.522696.
World Perf: Episode 46.000000. Reward 23.666667. action: 0.000000. mean reward 19.564136.
World Perf: Episode 49.000000. Reward 17.333333. action: 0.000000. mean reward 19.541828.
World Perf: Episode 52.000000. Reward 27.666667. action: 0.000000. mean reward 19.623076.
World Perf: Episode 55.000000. Reward 21.333333. action: 1.000000. mean reward 19.640179.
World Perf: Episode 58.000000. Reward 20.666667. action: 1.000000. mean reward 19.650443.
World Perf: Episode 61.000000. Reward 16.666667. action: 0.000000. mean reward 19.620606.
World Perf: Episode 64.000000. Reward 19.666667. action: 0.000000. mean reward 19.621066.
World Perf: Episode 67.000000. Reward 21.666667. action: 1.000000. mean reward 19.641522.
World Perf: Episode 70.000000. Reward 32.333333. action: 0.000000. mean reward 19.768440.
World Perf: Episode 73.000000. Reward 27.333333. action: 0.000000. mean reward 19.844089.
World Perf: Episode 76.000000. Reward 34.333333. action: 1.000000. mean reward 19.988982.
World Perf: Episode 79.000000. Reward 20.333333. action: 1.000000. mean reward 19.992425.
World Perf: Episode 82.000000. Reward 20.000000. action: 1.000000. mean reward 19.992501.
World Perf: Episode 85.000000. Reward 23.333333. action: 1.000000. mean reward 20.025909.
World Perf: Episode 88.000000. Reward 35.000000. action: 1.000000. mean reward 20.175650.
World Perf: Episode 91.000000. Reward 20.333333. action: 0.000000. mean reward 20.177227.
World Perf: Episode 94.000000. Reward 35.000000. action: 1.000000. mean reward 20.325455.
World Perf: Episode 97.000000. Reward 19.666667. action: 0.000000. mean reward 20.318867.
World Perf: Episode 100.000000. Reward 12.666667. action: 0.000000. mean reward 20.242345.
World Perf: Episode 103.000000. Reward 13.333333. action: 0.000000. mean reward 20.173255.
World Perf: Episode 106.000000. Reward 26.666667. action: 1.000000. mean reward 20.160816.
World Perf: Episode 109.000000. Reward 23.000000. action: 0.000000. mean reward 20.107731.
World Perf: Episode 112.000000. Reward 19.000000. action: 1.000000. mean reward 20.023344.
World Perf: Episode 115.000000. Reward 20.666667. action: 0.000000. mean reward 20.020765.
World Perf: Episode 118.000000. Reward 25.333333. action: 0.000000. mean reward 19.961702.
World Perf: Episode 121.000000. Reward 23.000000. action: 1.000000. mean reward 19.928999.
World Perf: Episode 124.000000. Reward 18.666667. action: 0.000000. mean reward 19.872879.
World Perf: Episode 127.000000. Reward 17.000000. action: 1.000000. mean reward 19.760763.
World Perf: Episode 130.000000. Reward 19.666667. action: 0.000000. mean reward 21.086786.
World Perf: Episode 133.000000. Reward 16.000000. action: 0.000000. mean reward 20.955912.
World Perf: Episode 136.000000. Reward 27.000000. action: 0.000000. mean reward 20.866470.
World Perf: Episode 139.000000. Reward 17.333333. action: 0.000000. mean reward 20.729610.
World Perf: Episode 142.000000. Reward 30.333333. action: 0.000000. mean reward 20.679577.
World Perf: Episode 145.000000. Reward 25.000000. action: 0.000000. mean reward 20.827681.
World Perf: Episode 148.000000. Reward 23.666667. action: 1.000000. mean reward 23.354219.
World Perf: Episode 151.000000. Reward 23.333333. action: 0.000000. mean reward 26.192713.
World Perf: Episode 154.000000. Reward 30.000000. action: 0.000000. mean reward 26.605247.
World Perf: Episode 157.000000. Reward 54.000000. action: 1.000000. mean reward 26.655001.
World Perf: Episode 160.000000. Reward 33.000000. action: 0.000000. mean reward 26.577667.
World Perf: Episode 163.000000. Reward 29.666667. action: 0.000000. mean reward 26.596146.
World Perf: Episode 166.000000. Reward 27.333333. action: 1.000000. mean reward 26.538157.
World Perf: Episode 169.000000. Reward 64.333333. action: 1.000000. mean reward 26.696426.
World Perf: Episode 172.000000. Reward 43.000000. action: 1.000000. mean reward 27.239616.
World Perf: Episode 175.000000. Reward 48.666667. action: 0.000000. mean reward 29.538530.
World Perf: Episode 178.000000. Reward 26.000000. action: 1.000000. mean reward 29.289825.
World Perf: Episode 181.000000. Reward 38.333333. action: 1.000000. mean reward 29.181463.
World Perf: Episode 184.000000. Reward 62.666667. action: 1.000000. mean reward 30.186125.
World Perf: Episode 187.000000. Reward 22.666667. action: 1.000000. mean reward 32.867023.
World Perf: Episode 190.000000. Reward 63.000000. action: 1.000000. mean reward 32.903473.
World Perf: Episode 193.000000. Reward 98.333333. action: 0.000000. mean reward 33.292240.
World Perf: Episode 196.000000. Reward 65.333333. action: 1.000000. mean reward 35.173031.
World Perf: Episode 199.000000. Reward 41.666667. action: 1.000000. mean reward 38.038906.
World Perf: Episode 202.000000. Reward 44.000000. action: 1.000000. mean reward 37.771332.
World Perf: Episode 205.000000. Reward 66.666667. action: 1.000000. mean reward 37.779060.
World Perf: Episode 208.000000. Reward 60.333333. action: 0.000000. mean reward 37.720272.
World Perf: Episode 211.000000. Reward 34.000000. action: 1.000000. mean reward 37.419216.
World Perf: Episode 214.000000. Reward 30.000000. action: 0.000000. mean reward 37.620312.
World Perf: Episode 217.000000. Reward 46.666667. action: 0.000000. mean reward 37.427105.
World Perf: Episode 220.000000. Reward 40.333333. action: 1.000000. mean reward 37.433071.
World Perf: Episode 223.000000. Reward 41.333333. action: 1.000000. mean reward 37.371838.
World Perf: Episode 226.000000. Reward 43.333333. action: 1.000000. mean reward 37.280632.
World Perf: Episode 229.000000. Reward 61.666667. action: 1.000000. mean reward 37.347809.
World Perf: Episode 232.000000. Reward 72.000000. action: 1.000000. mean reward 37.598980.
World Perf: Episode 235.000000. Reward 77.333333. action: 1.000000. mean reward 40.539330.
World Perf: Episode 238.000000. Reward 60.333333. action: 1.000000. mean reward 41.300999.
World Perf: Episode 241.000000. Reward 31.333333. action: 1.000000. mean reward 40.893749.
World Perf: Episode 244.000000. Reward 42.333333. action: 1.000000. mean reward 41.976902.
World Perf: Episode 247.000000. Reward 85.666667. action: 1.000000. mean reward 42.332020.
World Perf: Episode 250.000000. Reward 67.333333. action: 1.000000. mean reward 45.062092.
World Perf: Episode 253.000000. Reward 78.333333. action: 0.000000. mean reward 47.869877.
World Perf: Episode 256.000000. Reward 56.000000. action: 0.000000. mean reward 47.626255.
World Perf: Episode 259.000000. Reward 71.333333. action: 0.000000. mean reward 50.488789.
World Perf: Episode 262.000000. Reward 92.666667. action: 1.000000. mean reward 50.599895.
World Perf: Episode 265.000000. Reward 53.666667. action: 0.000000. mean reward 53.073410.
World Perf: Episode 268.000000. Reward 74.666667. action: 0.000000. mean reward 53.037830.
World Perf: Episode 271.000000. Reward 85.333333. action: 1.000000. mean reward 55.152660.
World Perf: Episode 274.000000. Reward 57.666667. action: 1.000000. mean reward 55.088223.
|
nbs/65_medical.text.ipynb | ###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#export
from fastai.basics import *
#hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming in late 2019 or early 2020!
###Code
#export
from fastai2.basics import *
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core.foundation.ipynb.
Converted 01a_core.utils.ipynb.
Converted 01b_core.dispatch.ipynb.
Converted 01c_core.transform.ipynb.
Converted 02_core.script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data.load.ipynb.
Converted 05_data.core.ipynb.
Converted 06_data.transforms.ipynb.
Converted 07_data.block.ipynb.
Converted 08_vision.core.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09a_vision.data.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.model.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 90_xse_resnext.ipynb.
Converted 96_data.external.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#export
from fastai2.basics import *
#hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming in late 2019 or early 2020!
###Code
#export
from fastai2.basics import *
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core.foundation.ipynb.
Converted 01a_core.utils.ipynb.
Converted 01b_core.dispatch.ipynb.
Converted 01c_core.transform.ipynb.
Converted 02_core.script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data.load.ipynb.
Converted 05_data.core.ipynb.
Converted 06_data.transforms.ipynb.
Converted 07_data.block.ipynb.
Converted 08_vision.core.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09a_vision.data.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.model.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 90_xse_resnext.ipynb.
Converted 96_data.external.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#export
from fastai2.basics import *
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#|export
from __future__ import annotations
from fastai.basics import *
#|hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#|hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#export
from fastai.basics import *
#hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming later!
###Code
#export
from fastai.basics import *
#hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Medical Text> Medical NLP data and models `fastai.medical.text` is coming in late 2019 or early 2020!
###Code
#export
from fastai2.test import *
from fastai2.core import *
from fastai2.data.all import *
from fastai2.optimizer import *
from fastai2.learner import *
from fastai2.metrics import *
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core.foundation.ipynb.
Converted 01a_core.utils.ipynb.
Converted 01b_core.dispatch.ipynb.
Converted 01c_core.transform.ipynb.
Converted 02_core.script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data.load.ipynb.
Converted 05_data.core.ipynb.
Converted 06_data.transforms.ipynb.
Converted 07_data.block.ipynb.
Converted 08_vision.core.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09a_vision.data.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.model.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 90_xse_resnext.ipynb.
Converted 96_data.external.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
|
ml/pandas_test.ipynb | ###Markdown
操作| 句法 |结果--|--|--选择列 |df[[col]]| Series用标签选择行 |df.loc[[label]] |Series用整数位置选择行 |df.iloc[[loc]]| Series行切片 |df[[5:10]]| DataFrame用布尔向量选择行| df[[bool_vec]] |DataFrame
###Code
df['id'].head()
print(df.iloc[0])
print(df[0:5])
###Output
id 1
category hotelname
ask 刚刚订单的酒店名称忘了帮我查下
Name: 0, dtype: object
id category ask
0 1 hotelname 刚刚订单的酒店名称忘了帮我查下
1 2 hotelname 用订单号查酒店
2 3 hotelname 我现在还不知道我住的酒店名字
3 4 hotelname 酒店名称忘了
4 5 hotelname 预定的酒店的名字没有
|
06-tfx-interactive.ipynb | ###Markdown
06 - TFX Interactive Training Pipeline ExecutionThe purpose of this notebook is to interactively run the following TFX pipeline steps:1. Receive hyperparameters using hyperparam_gen custom python component2. Extract data from BigQuery using BigQueryExampleGen3. Validate the raw data using StatisticsGen and ExampleValidator4. Process the data using Transform5. Train a custom model using Trainer7. Evaluat and Validate the custom model using ModelEvaluator7. Save the blessed to model registry location using using Pusher8. Upload the model to AI Platform using aip_model_pusher custom python componentThe custom components are implemented in the [tfx_pipeline/components.py](tfx_pipeline/components) module. Setup
###Code
%load_ext autoreload
%autoreload 2
import os
import json
import numpy as np
import tfx
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from tensorflow_transform.tf_metadata import schema_utils
import logging
from src.common import features
from src.model_training import data
from src.pipelines import components
logging.getLogger().setLevel(logging.INFO)
print("Tensorflow Version:", tfx.__version__)
print("Tensorflow Version:", tf.__version__)
PROJECT = 'ksalama-cloudml'
REGION = 'us-central1'
BUCKET = 'ksalama-cloudml-us'
DATASET_DISPLAYNAME = 'chicago_taxi_tips'
MODEL_DISPLAYNAME = f'{DATASET_DISPLAYNAME}_classifier_custom'
WORKSPACE = f"gs://{BUCKET}/ucaip_demo/chicago_taxi/pipelines_interactive"
RAW_SCHEMA_DIR = 'src/raw_schema'
MLMD_SQLLITE = 'mlmd.sqllite'
ARTIFACT_STORE = os.path.join(WORKSPACE, 'tfx_artifacts')
MODEL_REGISTRY = os.path.join(WORKSPACE, 'model_registry')
PIPELINE_NAME = f'{DATASET_DISPLAYNAME}_training_pipeline'
PIPELINE_ROOT = os.path.join(ARTIFACT_STORE)
###Output
_____no_output_____
###Markdown
Create Interactive Context
###Code
CLEAN_WORKSPACE = True
if tf.io.gfile.exists(WORKSPACE) and CLEAN_WORKSPACE:
print("Removing previous artifacts...")
tf.io.gfile.rmtree(WORKSPACE)
if tf.io.gfile.exists(MLMD_SQLLITE) and CLEAN_WORKSPACE:
print("Deleting previous mlmd.sqllite...")
tf.io.gfile.rmtree(MLMD_SQLLITE)
print(f'Pipeline artifacts directory: {PIPELINE_ROOT}')
print(f'Local metadata SQLlit path: {MLMD_SQLLITE}')
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.filename_uri = MLMD_SQLLITE
connection_config.sqlite.connection_mode = 3 # READWRITE_OPENCREATE
mlmd_store = mlmd.metadata_store.MetadataStore(connection_config)
context = InteractiveContext(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
metadata_connection_config=connection_config
)
###Output
_____no_output_____
###Markdown
1. Hyperparameter Generation
###Code
hyperparams_gen = components.hyperparameters_gen(
num_epochs=5,
learning_rate=0.001,
batch_size=512,
hidden_units='64,64',
)
context.run(hyperparams_gen, enable_cache=False)
json.load(
tf.io.gfile.GFile(
os.path.join(
hyperparams_gen.outputs.hyperparameters.get()[0].uri, 'hyperparameters.json')
)
)
###Output
_____no_output_____
###Markdown
2. Data Extraction
###Code
from src.utils import datasource_utils
from tfx.extensions.google_cloud_big_query.example_gen.component import BigQueryExampleGen
from tfx.proto import example_gen_pb2, transform_pb2
###Output
_____no_output_____
###Markdown
Extract train and eval splits
###Code
sql_query = datasource_utils.get_training_source_query(
PROJECT, REGION, DATASET_DISPLAYNAME, data_split='UNASSIGNED', limit=10000)
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
example_gen_pb2.SplitConfig.Split(name="train", hash_buckets=4),
example_gen_pb2.SplitConfig.Split(name="eval", hash_buckets=1),
]
)
)
train_example_gen = BigQueryExampleGen(query=sql_query, output_config=output_config)
beam_pipeline_args=[
f"--project={PROJECT}",
f"--temp_location=gs://{BUCKET}/bq_tmp"
]
context.run(
train_example_gen,
beam_pipeline_args=beam_pipeline_args,
enable_cache=False
)
###Output
_____no_output_____
###Markdown
Extract test split
###Code
sql_query = datasource_utils.get_training_source_query(
PROJECT, REGION, DATASET_DISPLAYNAME, data_split='TEST', limit=1000)
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
example_gen_pb2.SplitConfig.Split(name="test", hash_buckets=1),
]
)
)
test_example_gen = BigQueryExampleGen(query=sql_query, output_config=output_config)
beam_pipeline_args=[
f"--project={PROJECT}",
f"--temp_location=gs://{BUCKET}/bq_tmp"
]
context.run(
test_example_gen,
beam_pipeline_args=beam_pipeline_args,
enable_cache=False
)
train_uri = os.path.join(train_example_gen.outputs.examples.get()[0].uri, "Split-train/*")
print(train_uri)
source_raw_schema = tfdv.load_schema_text(os.path.join(RAW_SCHEMA_DIR, 'schema.pbtxt'))
raw_feature_spec = schema_utils.schema_as_feature_spec(source_raw_schema).feature_spec
def _parse_tf_example(tfrecord):
return tf.io.parse_single_example(tfrecord, raw_feature_spec)
tfrecord_filenames = tf.data.Dataset.list_files(train_uri)
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
dataset = dataset.map(_parse_tf_example)
for raw_features in dataset.shuffle(1000).batch(3).take(1):
for key in raw_features:
print(f"{key}: {np.squeeze(raw_features[key], -1)}")
print("")
###Output
_____no_output_____
###Markdown
3. Data Validation Import raw schema
###Code
schema_importer = tfx.components.common_nodes.importer_node.ImporterNode(
source_uri=RAW_SCHEMA_DIR,
artifact_type=tfx.types.standard_artifacts.Schema,
reimport=False
)
context.run(schema_importer)
###Output
_____no_output_____
###Markdown
Generate statistics
###Code
statistics_gen = tfx.components.StatisticsGen(
examples=train_example_gen.outputs.examples)
context.run(statistics_gen)
!rm -r {RAW_SCHEMA_DIR}/.ipynb_checkpoints/
###Output
_____no_output_____
###Markdown
Validate statistics against schema
###Code
example_validator = tfx.components.ExampleValidator(
statistics=statistics_gen.outputs.statistics,
schema=schema_importer.outputs.result,
)
context.run(example_validator)
context.show(example_validator.outputs.anomalies)
###Output
_____no_output_____
###Markdown
4. Data Transformation
###Code
_transform_module_file = 'src/preprocessing/transformations.py'
transform = tfx.components.Transform(
examples=train_example_gen.outputs.examples,
schema=schema_importer.outputs.result,
module_file=_transform_module_file,
splits_config=transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval']),
)
context.run(transform, enable_cache=False)
train_uri = os.path.join(transform.outputs.transformed_examples.get()[0].uri, "Split-train/*")
transform_graph_uri = transform.outputs.transform_graph.get()[0].uri
tft_output = tft.TFTransformOutput(transform_graph_uri)
transform_feature_spec = tft_output.transformed_feature_spec()
for input_features, target in data.get_dataset(
train_uri, transform_feature_spec, batch_size=3).take(1):
for key in input_features:
print(f"{key} ({input_features[key].dtype}): {input_features[key].numpy().tolist()}")
print(f"target: {target.numpy().tolist()}")
###Output
_____no_output_____
###Markdown
5. Model Training
###Code
from tfx.components.base import executor_spec
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.components.common.resolver import Resolver
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
###Output
_____no_output_____
###Markdown
Get the latest model to warm start
###Code
latest_model_resolver = Resolver(
strategy_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=tfx.types.Channel(type=tfx.types.standard_artifacts.Model)
)
context.run(latest_model_resolver, enable_cache=False)
###Output
_____no_output_____
###Markdown
Train the model
###Code
_train_module_file = 'src/model_training/runner.py'
trainer = tfx.components.Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(trainer_executor.GenericExecutor),
module_file=_train_module_file,
transformed_examples=transform.outputs.transformed_examples,
schema=schema_importer.outputs.result,
base_model=latest_model_resolver.outputs.latest_model,
transform_graph=transform.outputs.transform_graph,
train_args=tfx.proto.trainer_pb2.TrainArgs(num_steps=0),
eval_args=tfx.proto.trainer_pb2.EvalArgs(num_steps=None),
hyperparameters=hyperparams_gen.outputs.hyperparameters,
)
context.run(trainer, enable_cache=False)
###Output
_____no_output_____
###Markdown
6. Model Evaluation Get the latest blessed model for model validation.
###Code
blessed_model_resolver = Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=tfx.types.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.types.Channel(type=tfx.types.standard_artifacts.ModelBlessing)
)
context.run(blessed_model_resolver, enable_cache=False)
###Output
_____no_output_____
###Markdown
Evaluate and validate the model against the baseline model.
###Code
from tfx.components import Evaluator
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_tf_example',
label_key=features.TARGET_FEATURE_NAME,
prediction_key='probabilities')
],
slicing_specs=[
tfma.SlicingSpec(),
],
metrics_specs=[
tfma.MetricsSpec(
metrics=[
tfma.MetricConfig(class_name='ExampleCount'),
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.8}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))),
])
])
evaluator = Evaluator(
examples=test_example_gen.outputs.examples,
example_splits=['test'],
model=trainer.outputs.model,
baseline_model=blessed_model_resolver.outputs.model,
eval_config=eval_config,
schema=schema_importer.outputs.result
)
context.run(evaluator, enable_cache=False)
evaluation_results = evaluator.outputs.evaluation.get()[0].uri
print("validation_ok:", tfma.load_validation_result(evaluation_results).validation_ok)
for entry in list(tfma.load_metrics(evaluation_results))[0].metric_keys_and_values:
if entry.key.model_name == 'candidate':
value = entry.value.double_value.value
if value:
print(entry.key.name, ":", round(entry.value.double_value.value, 3))
###Output
_____no_output_____
###Markdown
7. Model Pushing
###Code
exported_model_location = os.path.join(MODEL_REGISTRY, f'{DATASET_DISPLAYNAME}_classifier')
push_destination=tfx.proto.pusher_pb2.PushDestination(
filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(
base_directory=exported_model_location,
)
)
pusher = tfx.components.Pusher(
model=trainer.outputs.model,
model_blessing=evaluator.outputs.blessing,
push_destination=push_destination
)
context.run(pusher, enable_cache=False)
###Output
_____no_output_____
###Markdown
8. Model Upload to AI Platform
###Code
serving_runtime ='tf2-cpu.2-4'
serving_image_uri = f"gcr.io/cloud-aiplatform/prediction/{serving_runtime}:latest"
aip_model_uploader = components.aip_model_uploader(
project=PROJECT,
region=REGION,
model_display_name=MODEL_DISPLAYNAME,
pushed_model_location=exported_model_location,
serving_image_uri=serving_image_uri,
)
context.run(aip_model_uploader, enable_cache=False)
aip_model_uploader.outputs.uploaded_model.get()[0].get_string_custom_property('model_uri')
###Output
_____no_output_____ |
Implementations/FY22/BLD_DECAT_CompareDA_GOOGLE/Untitled.ipynb | ###Markdown
Compare builidng footprintsCompare the newly released Google footprints dataset to the DigitizeAfrica datahttps://sites.research.google/open-buildings/
###Code
import sys, os, importlib
import rasterio
import pandas as pd
import geopandas as gpd
import GOSTRocks.rasterMisc as rMisc
from GOSTRocks.misc import tPrint
from shapely.geometry import Point
from shapely.wkt import loads
google_file = '/home/wb411133/temp/0fd_buildings.csv.gz'
inG = pd.read_csv(google_file)
inG['geometry'] = inG['geometry'].apply(lambda x: loads(x))
inGeom = gpd.GeoDataFrame(inG, geometry='geometry', crs='epsg:4326')
da_file = '/home/public/Data/COUNTRY/GHA/Buildings/120519/AFRICA_GHANA_building_32630.shp'
inD = gpd.read_file(da_file)
# limit the datasets to the select AOI
aoi_file = 'Data/ACCRA_AOI.shp'
inA = gpd.read_file(aoi_file)
inG_aoi = inA.to_crs(inGeom.crs)
selG = inGeom.loc[inGeom.intersects(inG_aoi.unary_union)]
inD_aoi = inA.to_crs(inD.crs)
selD = inD.loc[inD.intersects(inD_aoi.unary_union)]
out_folder = '/home/wb411133/temp/B_comparison'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
selG.to_file(os.path.join(out_folder, "google.shp"))
selD.to_file(os.path.join(out_folder, "da.shp"))
###Output
<ipython-input-29-cefd2e42a7bc>:5: UserWarning: Column names longer than 10 characters will be truncated when saved to ESRI Shapefile.
selG.to_file(os.path.join(out_folder, "google.shp"))
###Markdown
Rasterize buildings
###Code
dir(rMisc)
inD_pt = selD.copy()
inD_pt['geometry'] = inD_pt['geometry'].apply(lambda x: x.centroid)
inD_pt.head()
inG_pt = selG.copy()
inG_pt['geometry'] = inG_pt['geometry'].apply(lambda x: x.centroid)
inG_pt.head()
inG_pt = inG_pt.to_crs(inD_pt.crs)
res = 100
out_google = os.path.join(out_folder, f"google_buildings_{res}.tif")
gR = rMisc.rasterizeDataFrame(inG_pt, out_google, mergeAlg='ADD', res = res)
out_da = os.path.join(out_folder, f"da_buildings_{res}.tif")
daR = rMisc.rasterizeDataFrame(inD_pt, out_da, mergeAlg='ADD', templateRaster = out_google)
# Calculate differences between rasterized solutions
diff_image = gR['vals'] - daR['vals']
with rasterio.open(os.path.join(out_folder, f"bldg_difference_{res}.tif"), 'w', **gR['meta']) as outR:
outR.write_band(1, diff_image)
###Output
{'init': 'epsg:32630'}
|
examples/Conley_Index_Examples.ipynb | ###Markdown
Leslie map numerical
###Code
# Define Leslie map
def f(x):
th1 = 19.6
th2 = 23.68
return [(th1 * x[0] + th2 * x[1]) * math.exp (-0.1 * (x[0] + x[1])), 0.7 * x[0]]
# Define box map for f
def F(rect):
return CMGDB.BoxMap(f, rect, padding=True)
subdiv_min = 20
subdiv_max = 30
lower_bounds = [-0.001, -0.001]
upper_bounds = [90.0, 70.0]
model = CMGDB.Model(subdiv_min, subdiv_max, lower_bounds, upper_bounds, F)
%%time
morse_graph, map_graph = CMGDB.ComputeConleyMorseGraph(model)
CMGDB.PlotMorseGraph(morse_graph, cmap=matplotlib.cm.cool)
CMGDB.PlotMorseSets(morse_graph, cmap=matplotlib.cm.cool, fig_w=5, fig_h=5)
###Output
_____no_output_____
###Markdown
Leslie map interval arithmetic
###Code
# Define interval box map for f
def IntervalBoxMap2D(f, rect):
# Get endpoints defining rect
x1, y1, x2, y2 = rect
# Define interval box x
x = [interval[x1, x2], interval[y1, y2]]
# Evaluate f as an interval map
y = f(x)
# Get endpoints of y
# y[0] is the first variable interval
# y[1] is the second variable interval
x1, x2 = y[0][0].inf, y[0][0].sup
y1, y2 = y[1][0].inf, y[1][0].sup
return [x1, y1, x2, y2]
# Define interval Leslie map
def f(x):
th1 = interval[19.6]
th2 = interval[23.68]
return [(th1 * x[0] + th2 * x[1]) * imath.exp (-interval[0.1] * (x[0] + x[1])),
interval[0.7] * x[0]]
# Define interval box map for f
def F(rect):
return IntervalBoxMap2D(f, rect)
subdiv_min = 20
subdiv_max = 30
subdiv_init = 4
subdiv_limit = 10000
lower_bounds = [-0.001, -0.001]
upper_bounds = [90.0, 70.0]
model = CMGDB.Model(subdiv_min, subdiv_max, subdiv_init, subdiv_limit,
lower_bounds, upper_bounds, F)
%%time
morse_graph, map_graph = CMGDB.ComputeConleyMorseGraph(model)
CMGDB.PlotMorseGraph(morse_graph, cmap=matplotlib.cm.cool)
CMGDB.PlotMorseSets(morse_graph, cmap=matplotlib.cm.cool, fig_w=5, fig_h=5)
###Output
_____no_output_____
###Markdown
Additional examples
###Code
# Define map f
def f(x):
return [x[0] / (2.0 - x[0]), x[1] / (2.0 - x[1])]
# Define box map for f
def F(rect):
return CMGDB.BoxMap(f, rect, padding=False)
subdiv_min = 6
subdiv_max = 10
subdiv_init = 4
subdiv_limit = 10000
lower_bounds = [0, 0]
upper_bounds = [1, 1]
model = CMGDB.Model(subdiv_min, subdiv_max, subdiv_init, subdiv_limit,
lower_bounds, upper_bounds, F)
%%time
morse_graph, map_graph = CMGDB.ComputeConleyMorseGraph(model)
CMGDB.PlotMorseGraph(morse_graph, cmap=matplotlib.cm.cool)
CMGDB.PlotMorseSets(morse_graph, cmap=matplotlib.cm.cool, fig_w=5, fig_h=5)
subdiv_min = 6
subdiv_max = 10
subdiv_init = 4
subdiv_limit = 10000
lower_bounds = [0, 0]
upper_bounds = [1.2, 1.2]
model = CMGDB.Model(subdiv_min, subdiv_max, subdiv_init, subdiv_limit,
lower_bounds, upper_bounds, F)
%%time
morse_graph, map_graph = CMGDB.ComputeConleyMorseGraph(model)
CMGDB.PlotMorseGraph(morse_graph, cmap=matplotlib.cm.cool)
CMGDB.PlotMorseSets(morse_graph, cmap=matplotlib.cm.cool, fig_w=5, fig_h=5)
###Output
_____no_output_____
###Markdown
With interval arithmetic
###Code
# Define interval box map for f
def F(rect):
return IntervalBoxMap2D(f, rect)
subdiv_min = 6
subdiv_max = 8
subdiv_init = 4
subdiv_limit = 10000
lower_bounds = [0, 0]
upper_bounds = [1, 1]
model = CMGDB.Model(subdiv_min, subdiv_max, subdiv_init, subdiv_limit,
lower_bounds, upper_bounds, F)
%%time
morse_graph, map_graph = CMGDB.ComputeConleyMorseGraph(model)
CMGDB.PlotMorseGraph(morse_graph, cmap=matplotlib.cm.cool)
CMGDB.PlotMorseSets(morse_graph, cmap=matplotlib.cm.cool, fig_w=5, fig_h=5)
###Output
_____no_output_____ |
Data Visualization/Matplotlib/sample_plots.ipynb | ###Markdown
Sample plots in MatplotlibHere you'll find a host of example plots with the code thatgenerated them.Line Plot=========Here's how to create a line plot with text labels using:func:`~matplotlib.pyplot.plot`... figure:: ../../gallery/lines_bars_and_markers/images/sphx_glr_simple_plot_001.png :target: ../../gallery/lines_bars_and_markers/simple_plot.html :align: center :scale: 50 Simple PlotMultiple subplots in one figure===============================Multiple axes (i.e. subplots) are created with the:func:`~matplotlib.pyplot.subplot` function:.. figure:: ../../gallery/subplots_axes_and_figures/images/sphx_glr_subplot_001.png :target: ../../gallery/subplots_axes_and_figures/subplot.html :align: center :scale: 50 SubplotImages======Matplotlib can display images (assuming equally spacedhorizontal dimensions) using the :func:`~matplotlib.pyplot.imshow` function... figure:: ../../gallery/images_contours_and_fields/images/sphx_glr_image_demo_003.png :target: ../../gallery/images_contours_and_fields/image_demo.html :align: center :scale: 50 Example of using :func:`~matplotlib.pyplot.imshow` to display a CT scanContouring and pseudocolor==========================The :func:`~matplotlib.pyplot.pcolormesh` function can make a coloredrepresentation of a two-dimensional array, even if the horizontal dimensionsare unevenly spaced. The:func:`~matplotlib.pyplot.contour` function is another way to representthe same data:.. figure:: ../../gallery/images_contours_and_fields/images/sphx_glr_pcolormesh_levels_001.png :target: ../../gallery/images_contours_and_fields/pcolormesh_levels.html :align: center :scale: 50 Example comparing :func:`~matplotlib.pyplot.pcolormesh` and :func:`~matplotlib.pyplot.contour` for plotting two-dimensional dataHistograms==========The :func:`~matplotlib.pyplot.hist` function automatically generateshistograms and returns the bin counts or probabilities:.. figure:: ../../gallery/statistics/images/sphx_glr_histogram_features_001.png :target: ../../gallery/statistics/histogram_features.html :align: center :scale: 50 Histogram FeaturesPaths=====You can add arbitrary paths in Matplotlib using the:mod:`matplotlib.path` module:.. figure:: ../../gallery/shapes_and_collections/images/sphx_glr_path_patch_001.png :target: ../../gallery/shapes_and_collections/path_patch.html :align: center :scale: 50 Path PatchThree-dimensional plotting==========================The mplot3d toolkit (see `toolkit_mplot3d-tutorial` and`mplot3d-examples-index`) has support for simple 3d graphsincluding surface, wireframe, scatter, and bar charts... figure:: ../../gallery/mplot3d/images/sphx_glr_surface3d_001.png :target: ../../gallery/mplot3d/surface3d.html :align: center :scale: 50 Surface3dThanks to John Porter, Jonathon Taylor, Reinier Heeres, and Ben Root forthe `mplot3d` toolkit. This toolkit is included with all standard Matplotlibinstalls.Streamplot==========The :meth:`~matplotlib.pyplot.streamplot` function plots the streamlines ofa vector field. In addition to simply plotting the streamlines, it allows youto map the colors and/or line widths of streamlines to a separate parameter,such as the speed or local intensity of the vector field... figure:: ../../gallery/images_contours_and_fields/images/sphx_glr_plot_streamplot_001.png :target: ../../gallery/images_contours_and_fields/plot_streamplot.html :align: center :scale: 50 Streamplot with various plotting options.This feature complements the :meth:`~matplotlib.pyplot.quiver` function forplotting vector fields. Thanks to Tom Flannaghan and Tony Yu for adding thestreamplot function.Ellipses========In support of the `Phoenix `_mission to Mars (which used Matplotlib to display ground tracking ofspacecraft), Michael Droettboom built on work by Charlie Moad to providean extremely accurate 8-spline approximation to elliptical arcs (see:class:`~matplotlib.patches.Arc`), which are insensitive to zoom level... figure:: ../../gallery/shapes_and_collections/images/sphx_glr_ellipse_demo_001.png :target: ../../gallery/shapes_and_collections/ellipse_demo.html :align: center :scale: 50 Ellipse DemoBar charts==========Use the :func:`~matplotlib.pyplot.bar` function to make bar charts, whichincludes customizations such as error bars:.. figure:: ../../gallery/statistics/images/sphx_glr_barchart_demo_001.png :target: ../../gallery/statistics/barchart_demo.html :align: center :scale: 50 Barchart DemoYou can also create stacked bars(`bar_stacked.py `_),or horizontal bar charts(`barh.py `_).Pie charts==========The :func:`~matplotlib.pyplot.pie` function allows you to create piecharts. Optional features include auto-labeling the percentage of area,exploding one or more wedges from the center of the pie, and a shadow effect.Take a close look at the attached code, which generates this figure in justa few lines of code... figure:: ../../gallery/pie_and_polar_charts/images/sphx_glr_pie_features_001.png :target: ../../gallery/pie_and_polar_charts/pie_features.html :align: center :scale: 50 Pie FeaturesTables======The :func:`~matplotlib.pyplot.table` function adds a text tableto an axes... figure:: ../../gallery/misc/images/sphx_glr_table_demo_001.png :target: ../../gallery/misc/table_demo.html :align: center :scale: 50 Table DemoScatter plots=============The :func:`~matplotlib.pyplot.scatter` function makes a scatter plotwith (optional) size and color arguments. This example plots changesin Google's stock price, with marker sizes reflecting thetrading volume and colors varying with time. Here, thealpha attribute is used to make semitransparent circle markers... figure:: ../../gallery/lines_bars_and_markers/images/sphx_glr_scatter_demo2_001.png :target: ../../gallery/lines_bars_and_markers/scatter_demo2.html :align: center :scale: 50 Scatter Demo2GUI widgets===========Matplotlib has basic GUI widgets that are independent of the graphicaluser interface you are using, allowing you to write cross GUI figuresand widgets. See :mod:`matplotlib.widgets` and the`widget examples `_... figure:: ../../gallery/widgets/images/sphx_glr_slider_demo_001.png :target: ../../gallery/widgets/slider_demo.html :align: center :scale: 50 Slider and radio-button GUI.Filled curves=============The :func:`~matplotlib.pyplot.fill` function lets youplot filled curves and polygons:.. figure:: ../../gallery/lines_bars_and_markers/images/sphx_glr_fill_001.png :target: ../../gallery/lines_bars_and_markers/fill.html :align: center :scale: 50 FillThanks to Andrew Straw for adding this function.Date handling=============You can plot timeseries data with major and minor ticks and customtick formatters for both... figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_date_001.png :target: ../../gallery/text_labels_and_annotations/date.html :align: center :scale: 50 DateSee :mod:`matplotlib.ticker` and :mod:`matplotlib.dates` for details and usage.Log plots=========The :func:`~matplotlib.pyplot.semilogx`,:func:`~matplotlib.pyplot.semilogy` and:func:`~matplotlib.pyplot.loglog` functions simplify the creation oflogarithmic plots... figure:: ../../gallery/scales/images/sphx_glr_log_demo_001.png :target: ../../gallery/scales/log_demo.html :align: center :scale: 50 Log DemoThanks to Andrew Straw, Darren Dale and Gregory Lielens for contributionslog-scaling infrastructure.Polar plots===========The :func:`~matplotlib.pyplot.polar` function generates polar plots... figure:: ../../gallery/pie_and_polar_charts/images/sphx_glr_polar_demo_001.png :target: ../../gallery/pie_and_polar_charts/polar_demo.html :align: center :scale: 50 Polar DemoLegends=======The :func:`~matplotlib.pyplot.legend` function automaticallygenerates figure legends, with MATLAB-compatible legend-placementfunctions... figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_legend_001.png :target: ../../gallery/text_labels_and_annotations/legend.html :align: center :scale: 50 LegendThanks to Charles Twardy for input on the legend function.TeX-notation for text objects=============================Below is a sampling of the many TeX expressions now supported by Matplotlib'sinternal mathtext engine. The mathtext module provides TeX style mathematicalexpressions using `FreeType `_and the DejaVu, BaKoMa computer modern, or `STIX `_fonts. See the :mod:`matplotlib.mathtext` module for additional details... figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_mathtext_examples_001.png :target: ../../gallery/text_labels_and_annotations/mathtext_examples.html :align: center :scale: 50 Mathtext ExamplesMatplotlib's mathtext infrastructure is an independent implementation anddoes not require TeX or any external packages installed on your computer. Seethe tutorial at :doc:`/tutorials/text/mathtext`.Native TeX rendering====================Although Matplotlib's internal math rendering engine is quitepowerful, sometimes you need TeX. Matplotlib supports external TeXrendering of strings with the *usetex* option... figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_tex_demo_001.png :target: ../../gallery/text_labels_and_annotations/tex_demo.html :align: center :scale: 50 Tex DemoEEG GUI=======You can embed Matplotlib into pygtk, wx, Tk, or Qt applications.Here is a screenshot of an EEG viewer called `pbrain`__.The lower axes uses :func:`~matplotlib.pyplot.specgram`to plot the spectrogram of one of the EEG channels.For examples of how to embed Matplotlib in different toolkits, see: * :doc:`/gallery/user_interfaces/embedding_in_gtk3_sgskip` * :doc:`/gallery/user_interfaces/embedding_in_wx2_sgskip` * :doc:`/gallery/user_interfaces/mpl_with_glade3_sgskip` * :doc:`/gallery/user_interfaces/embedding_in_qt_sgskip` * :doc:`/gallery/user_interfaces/embedding_in_tk_sgskip`XKCD-style sketch plots=======================Just for fun, Matplotlib supports plotting in the style of `xkcd`... figure:: ../../gallery/showcase/images/sphx_glr_xkcd_001.png :target: ../../gallery/showcase/xkcd.html :align: center :scale: 50 xkcd Subplot example===============Many plot types can be combined in one figure to createpowerful and flexible representations of data.
###Code
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(19680801)
data = np.random.randn(2, 100)
fig, axs = plt.subplots(2, 2, figsize=(5, 5))
axs[0, 0].hist(data[0])
axs[1, 0].scatter(data[0], data[1])
axs[0, 1].plot(data[0], data[1])
axs[1, 1].hist2d(data[0], data[1])
plt.show()
###Output
_____no_output_____ |
4_2_bistability.ipynb | ###Markdown
cells defined in earlier notebooks
###Code
def R_nonbinding_3eq(y,t):
"""
system of ODEs from Zaytsev 2016, simplified using two mass balances
with the following components:
- a: inactive Aurora B kinase
- A: active Aurora B kinase
- AA: enzyme-substrate complex of inactive + active Aurora B kinase
- Ph: phosphatase
- PhA: enzyme-substrate complex of phosphatase + active Aurora B kinase
- a0: total Aurora B kinase
- p0: total phosphatase
"""
# set variable space
A, AA, Ph = y
# mass balances
PhA = p0 - Ph
a = a0 - A - 2*AA - PhA
# reaction equations
dAdt = (kcis - kfa*A)*a + (kra+2*kca)*AA - kfp*A*Ph + krp*PhA
dAAdt = kfa*A*a - (kra+kca)*AA
dPhdt = -kfp*A*Ph + (krp + kcp)*PhA
return dAdt, dAAdt, dPhdt
"""
parameters from Zaytsev 2016
"""
kcis = 7.29*10**-6 # 1/s # rate constant for 'in cis' Aurora B activation
kfa = 0.1 # 1/(uM*s) # rate constant for AA complex formation
kca = 2.7*10**-2 # 1/s # rate constant for AA catalysis
Kma = 51 # uM # Michaelis constant for AA 'in trans' activation
kra = kfa*Kma-kca # 1/ # rate constant for AA complex dissociation
kfp = 0.6 # 1/(uM*s) # rate constant for PhA complex formation
kcp = 2.4*10**-2 # 1/s # rate constant for PhA catalysis
Kmp = 1.95 # uM # Michaelis constant for PhA 'in trans' activation
krp = kfp*Kmp-kcp # 1/s # rate constant for PhA complex dissociation
def R_nonbinding_3eq_cisonly(y,t):
"""
system of ODEs from Zaytsev 2016, with the 'in trans' reaction component removed
"""
# set variable space
A, Ph = y
# mass balances
PhA = p0 - Ph
a = a0 - A - PhA
# reaction equations
dAdt = kcis_only*a - kfp*A*Ph + krp*PhA
dPhdt = -kfp*A*Ph + (krp + kcp)*PhA
return dAdt, dPhdt
"""
parameter specific to the 'cis only' set of ODEs, fit to demonstrate principle
"""
kcis_only = 1.8*10**-3 # 1/s # rate constant for 'in cis' Aurora B activation
###Output
_____no_output_____
###Markdown
demonstration of bistability
###Code
"""
Figure 5A
time evolution of the 'in cis + in trans' system showing bistability at 0.55 uM phosphatase where:
- an initially low state retains low activity
- an initially high state retains high activity
- this stability at two states does not exist for other phosphatase concentrations plotted
"""
a0 = 10
p0s = [0,.15,.35,.55,.75]
colors = ['k','C0','C1','C2','C3']
colors_dashed = ['k--','C0--','C1--','C2--','C3--']
t = np.linspace(0,140*60,500)
for p0,color,color_dashed in zip(p0s,colors,colors_dashed):
# initially low activity
y = odeint(R_nonbinding_3eq,[0,0,p0],t)
plt.plot(t/60,y[:,0],color,label=f'{p0}')
# initial high activity
y = odeint(R_nonbinding_3eq,[a0,0,p0],t)
plt.plot(t/60,y[:,0],color_dashed)
plt.legend(title='[PPase] (\u03BCM)',loc='lower right')
plt.xlabel("Time (min)")
plt.ylabel("[ABKp] (\u03BCM)");
"""
Figure 5B
time evolution of the 'in cis' system showing no bistability
- thus, the 'in trans' reactions (positive feedback) are necessary for bistability
"""
a0 = 10
p0s = [0,.15,.35,.55,.75]
colors = ['k','C0','C1','C2','C3']
colors_dashed = ['k--','C0--','C1--','C2--','C3--']
t = np.linspace(0,140*60,500)
for p0,color,color_dashed in zip(p0s,colors,colors_dashed):
y = odeint(R_nonbinding_3eq_cisonly,[0,p0],t)
plt.plot(t/60,y[:,0],color,label=f'{p0}')
y = odeint(R_nonbinding_3eq_cisonly,[a0,p0],t)
plt.plot(t/60,y[:,0],color_dashed)
plt.legend(title='[PPase] (\u03BCM)',loc='lower right')
plt.xlabel("Time (min)")
plt.ylabel("[ABKp] (\u03BCM)");
def saddle_node_locator(ss_list):
"""
find point where steady state (ss) jumps (hysteresis)
where unsteady state manifold appears/disappears
"""
for n,(i,j) in enumerate(zip(ss_list[:-1], ss_list[1:])):
if abs(j-i) > 0.3:
return n+1
"""
algorithm to find steady states + unsteady state manifold in the bistable regions
of the 'in cis + in trans' system at different phosphatase concentrations
"""
start = time.time()
## algorithm takes ~3 min
tspan = np.linspace(0,5000*60,10000)
Atot_range = np.arange(0,17.5,.05)
curves_15_75 = []
for p0 in [.15,.35,.55,.75]:
lo_list = []
hi_list = []
Atot_bistable_list = []
bistable_list = []
for a0 in Atot_range:
# time evolutions starting with low + high active kinase levels
lo_init = [0,0,p0]
y = odeint(R_nonbinding_3eq,lo_init,tspan)
lo_ss = y[-1,0]
lo_list.append(lo_ss)
hi_init = [a0,0,p0]
y = odeint(R_nonbinding_3eq,hi_init,tspan)
hi_ss = y[-1,0]
hi_list.append(hi_ss)
# if steady state values differ with low + high initial active kinase levels,
# system is bistable, record location along with both steady states
if not np.isclose(lo_ss, hi_ss, atol=1):
Atot_bistable_list.append(a0)
bistable_list.append((a0, lo_ss, hi_ss))
if len(bistable_list) == 0:
# if no bistabiliy exists at this phosphatase concentration, record curve + move on
curves_15_75.append((p0,lo_list,0,hi_list,0,0,0))
else:
# if bistability exists, time evolve at increasing kinase concentration
# until steady state diverges, record point as unstable manifold
unstablemanifold_list = []
for a0, lo_ss, hi_ss in bistable_list:
A0 = lo_ss
y_sim = np.zeros((2,2))
y_sim[-1,0] = -1
while y_sim[-1,0] < np.average((A0,lo_ss)):
A0 += .01
A_init = [A0,0,p0]
y_sim = odeint(R_nonbinding_3eq,A_init,tspan)
unstablemanifold_list.append(A0)
# finds hysteresis points in low and high steady state curves
n_lo = saddle_node_locator(lo_list)
n_hi = saddle_node_locator(hi_list)
# plot low ss until hysteresis + unstable manifold + high ss from hysteresis
saddle_x = [Atot_range[n_hi]] + Atot_bistable_list + [Atot_range[n_lo-1]]
saddle_y = [hi_list[n_hi]] + unstablemanifold_list + [lo_list[n_lo-1]]
curves_15_75.append((p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y))
pickle.dump(curves_15_75,open('curves_15_75','wb'))
end = time.time()
print(f'~ {round( (end - start)/60, 1 )} min')
"""
Figure 5C
plots bistability curve results of above algorithm
dotted lines show the region of systems that are bistable
gray dashed line bisects the curves at values that correspond to the plot above,
explaining the bistable behavior at 0.55 uM + monostable behavior otherwise
"""
curves_15_75 = pickle.load(open('curves_15_75','rb'))
Atot_range = np.arange(0,17.5,.05)
plt.plot(Atot_range,Atot_range,'k', label = '0')
p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y = curves_15_75[0]
plt.plot(Atot_range,lo_list,'C0', label = '0.15')
p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y = curves_15_75[1]
plt.plot(Atot_range[:n_lo], lo_list[:n_lo],'C1', label = '0.35')
plt.plot(Atot_range[n_hi:], hi_list[n_hi:],'C1')
plt.plot(saddle_x,saddle_y,'C1:')
p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y = curves_15_75[2]
plt.plot(Atot_range[:n_lo], lo_list[:n_lo],'C2', label = '0.55')
plt.plot(Atot_range[n_hi:], hi_list[n_hi:],'C2')
plt.plot(saddle_x,saddle_y,'C2:')
p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y = curves_15_75[3]
plt.plot(Atot_range[:n_lo], lo_list[:n_lo],'C3', label = '0.75')
plt.plot(Atot_range[n_hi:], hi_list[n_hi:],'C3')
# first three points not plotted due to imprecision in algorithm
plt.plot(saddle_x[3:],saddle_y[3:],'C3:')
# plot vertical line showing steady state values for 10 uM kinase, relating to previous two plots
plt.axvline(10,color='gray',linestyle='dashed')
plt.legend(title='[PPase] (\u03BCM)')
plt.xlabel('[Total ABK] (\u03BCM)')
plt.ylabel('[ABKp] (\u03BCM)')
plt.xlim(0,17.5)
plt.ylim(0,14)
plt.locator_params(axis='x', nbins=7);
"""
algorithm to find steady states
of the 'in cis' system at different phosphatase concentrations
"""
start = time.time()
## algorithm takes <1 min
tspan = np.linspace(0,5000*60,10000)
Atot_range = np.arange(0,17.5,.05)
curves_15_75_cis = []
for p0 in [.15,.35,.55,.75]:
ss_list = []
for a0 in Atot_range:
init = [0,p0]
y = odeint(R_nonbinding_3eq_cisonly,init,tspan)
ss_list.append(y[-1,0])
curves_15_75_cis.append((p0,ss_list,0,0,0,0,0))
end = time.time()
print(f'~ {round( (end - start)/60, 1 )} min')
"""
Figure 5D
plots curves of the above algorithm showing no bistability
"""
colors = ['C0','C1','C2','C3']
plt.plot(Atot_range,Atot_range,'k', label = '0')
for (p0,lo_list,n_lo,hi_list,n_hi,saddle_x,saddle_y),color in zip(curves_15_75_cis,colors):
plt.plot(Atot_range,lo_list, color, label = f'{p0}')
plt.axvline(10,color='gray',linestyle='dashed')
plt.legend(title='[PPase] (\u03BCM)')
plt.xlabel('[Total ABK] (\u03BCM)')
plt.ylabel('[ABKp] (\u03BCM)')
plt.xlim(-.5,18)
plt.ylim(-.05, 14*1.05);
###Output
_____no_output_____ |
src/user_guide/output_statement.ipynb | ###Markdown
The `output` statement * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * Step output are defined for each substep and can be derived from substep input (variable `_input`) * Variable `step_output` is defined at the completion of the step, and can be passed to other steps The output statement defines the output files or targets of a SoS step, it is optional but is fundamental for the creation of all but very simple workflows. You can check out the [How to create dependencies between SoS steps](step_dependencies.html) tutorial for a quick overview of the use of output statements. This tutorial lists what you can put in the output statement of a step with simple examples and you should refer to other tutorials for more in-depth discussions of the topics. Steps with no output statement The `output` statement is optional. When no output file is defined, a step will have undefined output. For example, the following workflow has a step `A` that execute a simple shell script. No output statement is needed and the workflow will work just fine.
###Code
%run A -v0
[A_1]
sh:
echo do something
[A_2]
print(f'The input of step {step_name} is "{step_input}"')
###Output
[32m[[0m[32m#[0m[32m#[0m[32m][0m 2 steps processed (2 jobs completed)
###Markdown
In simple workflows with numerically indexed steps, an empty output will be passed to the next step. Unnamed output files The easiest way to explicitly specify input of a step is to list output files directly in the `output` statement.
###Code
output: 'a.txt'
_output.touch()
print(f'_output is {_output}')
###Output
_output is a.txt
###Markdown
Here we showed touch function for _output, which is of type sos_targets. This function creates one or more files in variable _output and will be used quite often in the tutorials because SoS will check if the output file exists after the execution of the step.As for the case of input statement, multiple files can be listed as multiple paramters, sequences (list, tuple etc), or variables of string or sequence types. Output in substeps It is very important to remember that **output statement defines output for substeps**.Let us create a few input files,
###Code
!touch a.txt b.txt c.txt d.txt
###Output
_____no_output_____
###Markdown
In the following example, option `group_by` creates two substeps with `_input` being `a.txt` and `b.txt` respectively. The `_input` (actually `_input[0]` is of type `file_target`, which is derived from `pathlib.Path` so you can use any member function for `pathlib.Path`. Here we use `with_suffix` to obtain `a.bak` from `a.txt`.
###Code
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
###Markdown
As you can see, `_output` is defined for each substep from `_input`. But what is `step_output`?**`step_output` is defined as an accumuted version of `_output`, with `_output` as its groups**. It is useful only when the output is imported to other steps, either implicitly as show below, or as output of functions `output_from` and `named_output`.
###Code
%run -v0
[10]
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
[20]
print(f'step_input is {step_input}, substep input is {_input}')
###Output
[32m[[0m[32m#[0m[32m#[0m[32m][0m 2 steps processed (4 jobs completed)
###Markdown
**SoS substeps must produce different sets of `_output`**. The following workflow will fail to execute because both substeps will attemp to produce `a.bak`.
###Code
%env --expect-error
input: 'a.txt', 'b.txt', group_by=1
output: 'a.bak'
_output.touch()
###Output
RuntimeError: Failed to process step output ('a.bak'): Output a.bak from substep 1 of 2 substeps overlaps with output from a previous substep.
###Markdown
Output with predefined groups (option `group_by`) In situations when you have predefined input and output pairs, you can define output groups with option `group_by`. The key here is that the number of groups should match the number of substeps.For example,
###Code
%run -s force -v0
txt_files = ['a.txt', 'b.txt']
bak_files = ['a.bak', 'b.bak']
input: txt_files, group_by=1
output: bak_files, group_by=1
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
[32m[[0m[32m#[0m[32m][0m 1 step processed (2 jobs completed)
###Markdown
Named output Similar to named input, you can assign labels to output files and refer them with `_output["label"]`.
###Code
output: A='a.txt', B='b.txt'
print(f"Output with label A is {_output['A']}, with label B is {_output['B']}")
print(f"Output of step is {_output}")
_output.touch()
###Output
Output with label A is a.txt, with label B is b.txt
Output of step is a.txt b.txt
###Markdown
More importantly though, is that these labels defines named output that can be referred to with function `named_output`.
###Code
%run -v0
[A]
output: A='a.txt', B='b.txt'
_output.touch()
[default]
input: named_output('A')
print(f'Input of step is {_input}')
###Output
[32m[[0m[90m#[0m[32m#[0m[32m][0m 2 steps processed (1 job completed, 1 job ignored)
###Markdown
Attach variables to individual output files The `paired_with` variables can be used to attach variables to output files.
###Code
output: 'a.txt', 'b.txt', paired_with=dict(sample_name=['A', 'B'])
print(f'Output of substep is {_output}, with sample names {_output[0].sample_name} and {_output[1].sample_name}')
_output.touch()
###Output
Output of substep is a.txt b.txt, with sample names A and B
###Markdown
Attach variables to output Option `group_with` can be used to attach variable to output groups, which can be useful as annotations for output files when the output is passed to other steps.A potentially confusing part of the `group_with` option is that it assigns elements to either `_output` or `step_output`, depending on how `output` statement is defined. If the `output` does not have `group_by` and `for_each` option (no group), `group_with` should assign a single element to the specific substep `_output`:
###Code
sample_names = ['A', 'B']
input: for_each=dict(sample_name=sample_names)
output: f'out_{sample_name}.txt', group_with=dict(sample=sample_names[_index])
print(f'Output of substep is {_output}, with sample name {_output.sample}')
_output.touch()
###Output
Output of substep is out_A.txt, with sample name A
Output of substep is out_B.txt, with sample name B
###Markdown
If you would like to attach some result to individual substep, it can be easier to just set the variable to `_output` though.
###Code
!rm -f out_0.txt out_1.txt
%run -v1 -s force
[10]
input: for_each=dict(i=range(2))
output: f'out_{i}.txt'
import random
seed = random.randint(1, 1000)
_output.touch()
_output.set(seed=seed)
[20]
print(f'seed of output {_input} is {_input.seed}')
###Output
seed of output out_0.txt is 21
seed of output out_1.txt is 490
###Markdown
The `output` statement * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * Step output are defined for each substep and can be derived from substep input (variable `_input`) * Variable `step_output` is defined at the completion of the step, and can be passed to other steps The output statement defines the output files or targets of a SoS step, it is optional but is fundamental for the creation of all but very simple workflows. You can check out the [How to create dependencies between SoS steps](step_dependencies.html) tutorial for a quick overview of the use of output statements. This tutorial lists what you can put in the output statement of a step with simple examples and you should refer to other tutorials for more in-depth discussions of the topics. Steps with no output statement The `output` statement is optional. When no output file is defined, a step will have undefined output. For example, the following workflow has a step `A` that execute a simple shell script. No output statement is needed and the workflow will work just fine.
###Code
%run A -v1
[A_1]
sh:
echo do something
[A_2]
print(f'The input of step {step_name} is "{step_input}"')
###Output
do something
The input of step A_2 is ""
###Markdown
In simple workflows with numerically indexed steps, an empty output will be passed to the next step. Unnamed output files The easiest way to explicitly specify input of a step is to list output files directly in the `output` statement.
###Code
output: 'a.txt'
_output.touch()
print(f'_output is {_output}')
###Output
_output is a.txt
###Markdown
Here we showed touch function for _output, which is of type sos_targets. This function creates one or more files in variable _output and will be used quite often in the tutorials because SoS will check if the output file exists after the execution of the step.As for the case of input statement, multiple files can be listed as multiple paramters, sequences (list, tuple etc), or variables of string or sequence types. Output in substeps The output statement can define output for a single substep or all substeps. That is to say, If the output targets are ungrouped, it defines _output. step_output would be an accumulated version of _output. If the output targets are grouped with options group_by or for_each, it defines step_output, which should have the same number of groups as step_input Let us create a few input files,
###Code
!touch a.txt b.txt c.txt d.txt
###Output
_____no_output_____
###Markdown
**The `output` statement usually defines output of a single substep**. In the following example, option `group_by` creates two substeps with `_input` being `a.txt` and `b.txt` respectively. The `_input` (actually `_input[0]` is of type `file_target`, which is derived from `pathlib.Path` so you can use any member function for `pathlib.Path`. Here we use `with_suffix` to obtain `a.bak` from `a.txt`.
###Code
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
###Markdown
As you can see, `_output` is defined for each substep from `_input`. But what is `step_output`?**`step_output` is defined as an accumuted version of `_output`, with `_output` as its groups**. It is useful only when the output is imported to other steps, either implicitly as show below, or as output of functions `output_from` and `named_output`.
###Code
%run -v1
[10]
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
[20]
print(f'step_input is {step_input}, substep input is {_input}')
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
step_input is a.bak b.bak, substep input is a.bak
step_input is a.bak b.bak, substep input is b.bak
###Markdown
**SoS substeps must produce different sets of `_output`**. The following workflow will fail to execute because both substeps will attemp to produce `a.bak`.
###Code
%env --expect-error
input: 'a.txt', 'b.txt', group_by=1
output: 'a.bak'
_output.touch()
###Output
RuntimeError: Failed to process step output ('a.bak'): Output a.bak from substep 1 of 2 substeps overlaps with output from a previous substep.
###Markdown
Output with predefined groups (option `group_by`) In situations when you have predefined input and output pairs, you can define output targets with groups using option `group_by`. The key here is that the number of groups should match the number of substeps. Technically speaking the `output` statement defines `step_output` and each substep takes one group as its `_output`.For example,
###Code
%run -s force -v1
txt_files = ['a.txt', 'b.txt']
bak_files = ['a.bak', 'b.bak']
input: txt_files, group_by=1
output: bak_files, group_by=1
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
###Markdown
Named output Similar to named input, you can assign labels to output files and refer them with `_output["label"]`.
###Code
output: A='a.txt', B='b.txt'
print(f"Output with label A is {_output['A']}, with label B is {_output['B']}")
print(f"Output of step is {_output}")
_output.touch()
###Output
Output with label A is a.txt, with label B is b.txt
Output of step is a.txt b.txt
###Markdown
More importantly though, is that these labels defines named output that can be referred to with function `named_output`.
###Code
%run -v1
[A]
output: A='a.txt', B='b.txt'
_output.touch()
[default]
input: named_output('A')
print(f'Input of step is {_input}')
###Output
Input of step is a.txt
###Markdown
Attach variables to individual output files The `paired_with` variables can be used to attach variables to output files.
###Code
output: 'a.txt', 'b.txt', paired_with=dict(sample_name=['A', 'B'])
print(f'Output of substep is {_output}, with sample names {_output[0].sample_name} and {_output[1].sample_name}')
_output.touch()
###Output
Output of substep is a.txt b.txt, with sample names A and B
###Markdown
Attach variables to output Option `group_with` can be used to attach variable to output groups, which can be useful as annotations for output files when the output is passed to other steps.A potentially confusing part of the `group_with` option is that it assigns elements to either `_output` or `step_output`, depending on how `output` statement is defined. If the `output` does not have `group_by` and `for_each` option, it defines a single `_output` and `group_with` should assign a single element to `_output` of this specific substep:
###Code
sample_names = ['A', 'B']
input: for_each=dict(sample_name=sample_names)
output: f'out_{sample_name}.txt', group_with=dict(sample=sample_names[_index])
print(f'Output of substep is {_output}, with sample name {_output.sample}')
_output.touch()
###Output
Output of substep is out_A.txt, with sample name A
Output of substep is out_B.txt, with sample name B
###Markdown
If you would like to attach some result to individual substep, it can be easier to just set the variable to `_output` though.
###Code
!rm -f out_0.txt out_1.txt
%run -v1 -s force
[10]
input: for_each=dict(i=range(2))
output: f'out_{i}.txt'
import random
seed = random.randint(1, 1000)
_output.touch()
_output.set(seed=seed)
[20]
print(f'seed of output {_input} is {_input.seed}')
###Output
seed of output out_0.txt is 577
seed of output out_1.txt is 209
###Markdown
How to define step output * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * Step output are defined for each substep and can be derived from substep input (variable `_input`) * Variable `step_output` is defined at the completion of the step, and can be passed to other steps The output statement defines the output files or targets of a SoS step, it is optional but is fundamental for the creation of all but very simple workflows. You can check out the [How to create dependencies between SoS steps](doc/user_guide/step_dependencies.html) tutorial for a quick overview of the use of output statements. This tutorial lists what you can put in the output statement of a step with simple examples and you should refer to other tutorials for more in-depth discussions of the topics. Steps with no output statement The `output` statement is optional. When no output file is defined, a step will have undefined output. For example, the following workflow has a step `A` that execute a simple shell script. No output statement is needed and the workflow will work just fine.
###Code
%run A -v0
[A_1]
sh:
echo do something
[A_2]
print(f'The input of step {step_name} is "{step_input}"')
###Output
The input of step A_2 is ""
###Markdown
In simple workflows with numerically indexed steps, an empty output will be passed to the next step. Unnamed output files The easiest way to explicitly specify input of a step is to list output files directly in the `output` statement.
###Code
output: 'a.txt'
_output.touch()
print(f'_output is {_output}')
###Output
_____no_output_____
###Markdown
Here we showed touch function for _output, which is of type sos_targets. This function creates one or more files in variable _output and will be used quite often in the tutorials because SoS will check if the output file exists after the execution of the step.As for the case of input statement, multiple files can be listed as multiple paramters, sequences (list, tuple etc), or variables of string or sequence types. Output in substeps It is very important to remember that **output statement defines output for substeps**.Let us create a few input files,
###Code
!touch a.txt b.txt c.txt d.txt
###Output
_____no_output_____
###Markdown
In the following example, option `group_by` creates two substeps with `_input` being `a.txt` and `b.txt` respectively. The `_input` (actually `_input[0]` is of type `file_target`, which is derived from `pathlib.Path` so you can use any member function for `pathlib.Path`. Here we use `with_suffix` to obtain `a.bak` from `a.txt`.
###Code
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
_____no_output_____
###Markdown
As you can see, `_output` is defined for each substep from `_input`. But what is `step_output`?`step_output` is defined as an accumuted version of `_output`, with `_output` as its groups. It is useful only when the output is imported to other steps, either implicitly as show below, or as output of functions `output_from` and `named_output`.
###Code
%run -v0
[10]
input: 'a.txt', 'b.txt', group_by=1
output: _input.with_suffix('.bak')
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
[20]
print(f'step_input is {step_input}, substep input is {_input}')
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
step_input is a.bak b.bak, substep input is a.bak
step_input is a.bak b.bak, substep input is b.bak
###Markdown
Output with predefined groups In situations when you have predefined input and output pairs, you can define output groups with option `group_by`. The key here is that the number of groups should match the number of substeps.For example,
###Code
%run -s force -v0
txt_files = ['a.txt', 'b.txt']
bak_files = ['a.bak', 'b.bak']
input: txt_files, group_by=1
output: bak_files, group_by=1
print(f'Input of substep is {_input}, output of substep is {_output}')
_output.touch()
###Output
Input of substep is a.txt, output of substep is a.bak
Input of substep is b.txt, output of substep is b.bak
###Markdown
Named output Similar to named input, you can assign labels to output files and refer them with `_output["label"]`.
###Code
output: A='a.txt', B='b.txt'
print(f"Output with label A is {_output['A']}, with label B is {_output['B']}")
print(f"Output of step is {_output}")
_output.touch()
###Output
_____no_output_____
###Markdown
More importantly though, is that these labels defines named output that can be referred to with function `named_output`.
###Code
%run -v0
[A]
output: A='a.txt', B='b.txt'
_output.touch()
[default]
input: named_output('A')
print(f'Input of step is {_input}')
###Output
Input of step is a.txt
###Markdown
Attach variables to individual output files The `paired_with` variables can be used to attach variables to output files.
###Code
output: 'a.txt', 'b.txt', paired_with=dict(sample_name=['A', 'B'])
print(f'Output of substep is {_output}, with sample names {_output[0].sample_name} and {_output[1].sample_name}')
_output.touch()
###Output
Output of substep is a.txt b.txt, with sample names A and B
###Markdown
Attach variables to output Option `group_with` can be used to attach variable to output groups, which can be useful as annotations for output files when the output is passed to other steps.A potentially confusing part of the `group_with` option is that it assigns elements of the list to all `_output`, not to a single `_output` that the output statement is creating.
###Code
sample_names = ['A', 'B']
input: for_each=dict(sample_name=sample_names)
output: f'out_{sample_name}.txt', group_with=dict(sample=sample_names)
print(f'Output of substep is {_output}, with sample name {_output.sample}')
_output.touch()
###Output
Output of substep is out_A.txt, with sample name A
Output of substep is out_B.txt, with sample name B
###Markdown
If you would like to attach some result to individual substep, it can be easier to just set the variable to `_output` though.
###Code
%run -v0
[10]
input: for_each=dict(i=range(2))
output: f'out_{i}.txt'
import random
seed = random.randint(1, 1000)
_output.touch()
_output.set(seed=seed)
[20]
print(f'seed of output {_input} is {seed}')
###Output
seed of output out_0.txt is 369
seed of output out_1.txt is 701
|
flower_classifier_project/Image Classifier Project.ipynb | ###Markdown
Developing an AI applicationGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. The project is broken down into multiple steps:* Load and preprocess the image dataset* Train the image classifier on your dataset* Use the trained classifier to predict image contentWe'll lead you through each part which you'll implement in Python.When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
###Code
# Imports here
import json
import torch
import PIL
import matplotlib
import numpy as np
import torchvision as tv
import matplotlib.pyplot as plt
from torch import nn
from collections import OrderedDict
###Output
_____no_output_____
###Markdown
Load the dataHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
###Code
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
# Order important! Crops/Flips applied on Image; convert to Tensor; Normalize applied to Tensor
train_transforms = tv.transforms.Compose([tv.transforms.RandomRotation(30),
tv.transforms.RandomResizedCrop(224),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.RandomVerticalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
valid_transforms = tv.transforms.Compose([tv.transforms.Resize(255),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
test_transforms = tv.transforms.Compose([tv.transforms.Resize(255),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
# TODO: Load the datasets with ImageFolder
train_dataset = tv.datasets.ImageFolder(train_dir, transform=train_transforms)
valid_dataset = tv.datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_dataset = tv.datasets.ImageFolder(test_dir, transform=test_transforms)
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
Label mappingYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
###Code
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
###Output
_____no_output_____
###Markdown
Building and training the classifierNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.Refer to [the rubric](https://review.udacity.com/!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout* Train the classifier layers using backpropagation using the pre-trained network to get the features* Track the loss and accuracy on the validation set to determine the best hyperparametersWhen training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
###Code
# TODO: Build and train your network
def setup_nn(input_size, hidden_sizes, output_size, drop_p, learning_rate):
model = tv.models.vgg16(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('dropout',nn.Dropout(drop_p)),
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2',nn.ReLU()),
('fc3',nn.Linear(hidden_sizes[1], hidden_sizes[2])),
('relu3',nn.ReLU()),
('fc4',nn.Linear(hidden_sizes[2], output_size)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=learning_rate)
return model, criterion, optimizer
input_size=25088
hidden_sizes=[6200, 1600, 400]
output_size=102
drop_p=0.5
learning_rate=0.001
epochs = 12
print_every = 65
steps = 0
model, criterion, optimizer = setup_nn(input_size, hidden_sizes, output_size, drop_p, learning_rate)
# change to cuda
model.to('cuda')
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
###Output
Epoch: 1/12... Loss: 3.5941
Epoch: 2/12... Loss: 0.8176
Epoch: 2/12... Loss: 1.7052
Epoch: 3/12... Loss: 1.1713
Epoch: 4/12... Loss: 0.3069
Epoch: 4/12... Loss: 1.1863
Epoch: 5/12... Loss: 0.7203
Epoch: 6/12... Loss: 0.0861
Epoch: 6/12... Loss: 0.9855
Epoch: 7/12... Loss: 0.4439
Epoch: 7/12... Loss: 0.8979
Epoch: 8/12... Loss: 0.7905
Epoch: 9/12... Loss: 0.2835
Epoch: 9/12... Loss: 0.8226
Epoch: 10/12... Loss: 0.6167
Epoch: 11/12... Loss: 0.1266
Epoch: 11/12... Loss: 0.7714
Epoch: 12/12... Loss: 0.4457
Epoch: 12/12... Loss: 0.7175
###Markdown
Testing your networkIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
###Code
# TODO: Do validation on the test set
correct = 0
total = 0
model.to('cuda')
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.to('cuda'), labels.to('cuda')
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
###Output
Accuracy of the network on the test images: 83 %
###Markdown
Save the checkpointNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.```model.class_to_idx = image_datasets['train'].class_to_idx```Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
###Code
# TODO: Save the checkpoint
checkpoint = {'input_size': input_size,
'hidden_sizes': hidden_sizes,
'output_size': output_size,
'drop_p': drop_p,
'learning_rate': learning_rate,
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
###Output
_____no_output_____
###Markdown
Loading the checkpointAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
###Code
# TODO: Write a function that loads a checkpoint and rebuilds the model
checkpoint = torch.load('checkpoint.pth')
model,_,_ = setup_nn(checkpoint['input_size'],
checkpoint['hidden_sizes'],
checkpoint['output_size'],
checkpoint['drop_p'],
checkpoint['learning_rate'])
model.load_state_dict(checkpoint['state_dict'])
###Output
_____no_output_____
###Markdown
Inference for classificationNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```First you'll need to handle processing the input image such that it can be used in your network. Image PreprocessingYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.htmlPIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
###Code
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
pil_image = PIL.Image.open(image)
image_transforms = tv.transforms.Compose([tv.transforms.Resize(255),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
tensor_image = image_transforms(pil_image)
np_image = tensor_image.numpy()
#print(np.amin(np_image))
#print(np.amax(np_image))
#print(np_image)
#print(np_image.shape)
return np_image
###Output
_____no_output_____
###Markdown
To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
###Code
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
###Output
_____no_output_____
###Markdown
Class PredictionOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.htmltorch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.```pythonprobs, classes = predict(image_path, model)print(probs)print(classes)> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]> ['70', '3', '45', '62', '55']```
###Code
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.to('cuda')
image = process_image(image_path)
image = torch.from_numpy(image)
image = image.unsqueeze_(0)
image = image.to('cuda')
# Calculate the class probabilities (softmax) for img
with torch.no_grad():
output = model.forward(image)
output_softmaxed = torch.nn.functional.softmax(output.data, dim=1)
return output_softmaxed.topk(topk)
###Output
_____no_output_____
###Markdown
Sanity CheckingNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
###Code
# TODO: Display an image along with the top 5 classes
image_index = 1
image_path = test_dir + '/' + str(image_index) + '/image_06743.jpg'
predicted_probabilities, predicted_labels = predict(image_path, model)
image = process_image(image_path)
predicted_probabilities = np.array(predicted_probabilities[0])
predicted_labels = np.array(predicted_labels[0])
print(predicted_probabilities)
print(predicted_labels)
# Show image
ax1 = imshow(image, ax = plt)
ax1.axis('off')
ax1.title(cat_to_name[str(image_index)])
ax1.show()
# Do assignments
assigned_probabilities = np.array(predicted_probabilities)
assigned_labels = [cat_to_name[str(label+1)] for label in predicted_labels]
print(assigned_probabilities)
print(assigned_labels)
# Show Assignments
_,ax2 = plt.subplots()
ticks = np.arange(len(assigned_labels))
ax2.bar(ticks, assigned_probabilities)
ax2.set_xticks(ticks = ticks)
ax2.set_xticklabels(assigned_labels)
ax2.yaxis.grid(True)
plt.show()
###Output
[ 9.99501109e-01 1.15869087e-04 1.12051908e-04 1.05393607e-04
6.08839327e-05]
[ 0 49 95 87 84]
|
notebooks/T6 - 3 - K-Means.ipynb | ###Markdown
El método de k-means
###Code
import numpy as np
data = np.random.random(90).reshape(30,3)# reshape(30,3) reorganiza en 30 filas y 3 columnas
data
c1 = np.random.choice(range(len(data)))# choice : escoje al Azar
c2 = np.random.choice(range(len(data)))
clust_centers = np.vstack([data[c1], data[c2]])
clust_centers
from scipy.cluster.vq import vq
clusters = vq(data, clust_centers)
clusters
labels = clusters[0]
labels
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as ply
x = []
y = []
z = []
x2 = []
y2 = []
z2 = []
for i in range(0, len(labels)):
if(labels[i] == 0):
x.append(data[i,0])
y.append(data[i,1])
z.append(data[i,2])
else:
x2.append(data[i,0])
y2.append(data[i,1])
z2.append(data[i,2])
cluster1 = go.Scatter3d(
x=x,
y=y,
z=z,
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.9
),
name="Cluster 0"
)
cluster2 = go.Scatter3d(
x=x2,
y=y2,
z=z2,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
),
name="Cluster 1"
)
data2 = [cluster1, cluster2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=30
)
)
fig = go.Figure(data=data2, layout=layout)
ply.plot(fig, filename='Clusters')
from scipy.cluster.vq import kmeans
kmeans(data, clust_centers)
kmeans(data, 2)
###Output
_____no_output_____
###Markdown
El método de k-means
###Code
import numpy as np
data = np.random.random(90).reshape(30,3)
data
c1 = np.random.choice(range(len(data)))
c2 = np.random.choice(range(len(data)))
clust_centers = np.vstack([data[c1], data[c2]])
clust_centers
from scipy.cluster.vq import vq
clusters = vq(data, clust_centers)
clusters
labels = clusters[0]
labels
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as ply
x = []
y = []
z = []
x2 = []
y2 = []
z2 = []
for i in range(0, len(labels)):
if(labels[i] == 0):
x.append(data[i,0])
y.append(data[i,1])
z.append(data[i,2])
else:
x2.append(data[i,0])
y2.append(data[i,1])
z2.append(data[i,2])
cluster1 = go.Scatter3d(
x=x,
y=y,
z=z,
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.9
),
name="Cluster 0"
)
cluster2 = go.Scatter3d(
x=x2,
y=y2,
z=z2,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
),
name="Cluster 1"
)
data2 = [cluster1, cluster2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=30
)
)
fig = go.Figure(data=data2, layout=layout)
ply.plot(fig, filename='Clusters')
from scipy.cluster.vq import kmeans
kmeans(data, clust_centers)
kmeans(data, 2)
###Output
_____no_output_____
###Markdown
El método de k-means
###Code
import numpy as np
data = np.random.random(90).reshape(30,3)
data
c1 = np.random.choice(range(len(data)))
c2 = np.random.choice(range(len(data)))
clust_centers = np.vstack([data[c1], data[c2]])
clust_centers
from scipy.cluster.vq import vq
clusters = vq(data, clust_centers)
clusters
labels = clusters[0]
labels
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as ply
x = []
y = []
z = []
x2 = []
y2 = []
z2 = []
for i in range(0, len(labels)):
if(labels[i] == 0):
x.append(data[i,0])
y.append(data[i,1])
z.append(data[i,2])
else:
x2.append(data[i,0])
y2.append(data[i,1])
z2.append(data[i,2])
cluster1 = go.Scatter3d(
x=x,
y=y,
z=z,
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.9
),
name="Cluster 0"
)
cluster2 = go.Scatter3d(
x=x2,
y=y2,
z=z2,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
),
name="Cluster 1"
)
data2 = [cluster1, cluster2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=30
)
)
fig = go.Figure(data=data2, layout=layout)
ply.plot(fig, filename='Clusters')
from scipy.cluster.vq import kmeans
kmeans(data, clust_centers)
kmeans(data, 2)
###Output
_____no_output_____
###Markdown
El método de k-means
###Code
import numpy as np
data = np.random.random(90).reshape(30,3)
data
c1 = np.random.choice(range(len(data)))
c2 = np.random.choice(range(len(data)))
clust_centers = np.vstack([data[c1], data[c2]])
clust_centers
from scipy.cluster.vq import vq
vq(data, clust_centers)
from scipy.cluster.vq import kmeans
kmeans(data, clust_centers)
kmeans(data, 2)
###Output
_____no_output_____
###Markdown
El método de k-means
###Code
import numpy as np
data = np.random.random(90).reshape(30,3)
data
c1 = np.random.choice(range(len(data)))
c2 = np.random.choice(range(len(data)))
clust_centers = np.vstack([data[c1], data[c2]])
clust_centers
from scipy.cluster.vq import vq
clusters = vq(data, clust_centers)
clusters
labels = clusters[0]
labels
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.offline as ply
x = []
y = []
z = []
x2 = []
y2 = []
z2 = []
for i in range(0, len(labels)):
if(labels[i] == 0):
x.append(data[i,0])
y.append(data[i,1])
z.append(data[i,2])
else:
x2.append(data[i,0])
y2.append(data[i,1])
z2.append(data[i,2])
cluster1 = go.Scatter3d(
x=x,
y=y,
z=z,
mode='markers',
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5
),
opacity=0.9
),
name="Cluster 0"
)
cluster2 = go.Scatter3d(
x=x2,
y=y2,
z=z2,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
),
name="Cluster 1"
)
data2 = [cluster1, cluster2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=30
)
)
fig = go.Figure(data=data2, layout=layout)
ply.plot(fig, filename='Clusters')
from scipy.cluster.vq import kmeans
kmeans(data, clust_centers)
kmeans(data, 2)
###Output
_____no_output_____ |
bandits/simple_bandit.ipynb | ###Markdown
1. K-Armed Bandit ProblemBandit problems are reinforcement learning (RL) problems in which there is only a single state in which multiple actions can potentially be taken. In the k-armed bandit problem, you are repeatedly faced with a choice among k different options/actions. After selecting an action, you receive a reward chosen from a stationary probability distribution that is dependent on the selected action. Objective: Maximize the expected total reward over some time periodAlthough the rewards for actions are chosen from a probability distribution, each action has a mean reward value. We start out with estimates of the rewards for each action, but with more selections/experience, the estimates converge to the mean. If we have a 'way' to quantify the value of taking each action (the expected reward), then to achieve the objective, we would simply always take the action with the highest value. Mathematically speaking,$Q_t(a) = E[R_t | A_t = a]$This says that the value of an arbitrary action **a** is the expected reward given that **a** is selected* If we keep estimates of the action values, then at each step, there is at least one action whose estimate is the greatest. These actions are called **greedy actions** and if selected, we are said to be **exploting** our current knowledge of the values of actions* If instaed we select a non-greedy action, then we are said to be **exploring** because it enables us to improve our estimates of the non-greedy action values Sample-Average Action-Value EstimationA simple method of estimating the value of an action is by averaging the rewards previosly received from taking thataction. i.e.$$Q_t(a) = \frac{\text{sum of rewards when a is taken prior to t}}{\text{number of times a has been taken prior to t}}$$The next step is then to use the estimates to select actions. The simplest rule is to select the action (or one of the actions) with the highest estimated values. This is called the **greedy action selection** method and is denoted:$A_t = argmax_a Q_t(a)$where $argmax_a$ denotes the value of a at which the expression is maximized* If multiple actions maximize the expression, then it is important that the tie is broken **arbitrarily**You may have guessed that being greedy all the time is probably not the best way to go - there may be an unexplored action of higher value than our current greedy choice. An alternative is to be greedy most of the time, but every once in a while, with probability $\epsilon$, select a random action. Methods with this action selection rule are called **$\epsilon$-greedy methods**. This means that with probability $\epsilon$ we select a random action and with probability $1-\epsilon$ we select a greedy action. Problem DescriptionWe have a k-armed bandit problem with k = 10. The actual action values, $q_*(a)$ are selected according to a normal distribution with mean 0 and variance 1. When a learning method selects action $A_t$ at time t, the actual reward $R_t$ is selected from a normal distribution with mean $q_*(A_t)$ and variance 1. We'll measure the behavior as it improves over 1000 steps. This makes up one run. We'll execute 2000 independent runs to obtain the learning algorithm's average behavior. Environment* Python 3.5* numpy* matplotlib
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def sample_average(actions):
"""
Returns the action-value for each action using the sample-average method
:param actions: actions[0] is a tuple for each action of the form (sum of rewards received, no. of times taken)
"""
results = [0.0 if actions[i][1] == 0 else actions[i][0] / float(actions[i][1]) for i in range(len(actions))]
return results
def get_reward(true_values, a):
"""
Returns the reward for selecting action a.
Reward is selected around true_values[a] with unit variance (as in problem description)
:param true_values: list of expected reward for each action
:param a: index of action to return reward for
"""
reward = np.random.normal(true_values[a], size=1)[0]
return reward
def k_armed_bandit(k, epsilon, iterations):
"""
Performs a single run of the k-armed bandit experiment
:param k: the number of arms
:param epsilon: value of epsilon for epoch-greedy action selection
:param iterations: number of steps in a single run
"""
# Randomly assign true values of reward for each action with mean 0 and variance 1
true_values = np.random.normal(size=k)
# actions[i] is the ith action
# actions[i][0] is the sum of received rewards for taking action i
# actions[i][1] is the number of times action i has been taken
actions = [[0.0, 0] for _ in range(k)]
# Store the rewards received for this experiment
rewards = []
# Track how often the optimal action was selected
optimal = []
optimal_action = true_values.argmax()
for _ in range(iterations):
prob = np.random.rand(1)
if prob > epsilon:
# Greedy (exploit current knowledge)
action_values = np.array(sample_average(actions))
# Break ties arbitrarily (reference: http://stackoverflow.com/questions/42071597/numpy-argmax-random-tie-breaking)
a = np.random.choice(np.flatnonzero(action_values == action_values.max()))
else:
# Explore (take random action)
a = np.random.randint(0, k)
reward = get_reward(true_values, a)
# Update statistics for executed action
actions[a][0] += reward
actions[a][1] += 1
rewards.append(reward)
optimal.append(1 if a == optimal_action else 0)
return rewards, optimal
def experiment(k, epsilon, iters, epochs):
"""
Runs the k-armed bandit experiment
:param k: the number of arms
:param epsilon: the value of epsilon for epoch-greedy action selection
:param iters: the number of steps in a single run
:param epochs: the number of runs to execute
"""
rewards = []
optimal = []
for i in range(epochs):
r, o = k_armed_bandit(k, epsilon, iters)
rewards.append(r)
optimal.append(o)
print('Experiment with \u03b5 = {} completed.'.format(epsilon))
# Compute the mean reward for each iteration
r_means = np.mean(rewards, axis=0)
o_means = np.mean(optimal, axis=0)
return r_means, o_means
k = 10
iters = 1000
runs = 2000
# We experiment with values 0.01, 0.1 and 0 (always greedy)
r_exp1, o_exp1 = experiment(k, 0, iters, runs)
r_exp2, o_exp2 = experiment(k, 0.01, iters, runs)
r_exp3, o_exp3 = experiment(k, 0.1, iters, runs)
x = range(iters)
plt.plot(x, r_exp1, c='green', label='\u03b5 = 0')
plt.plot(x, r_exp2, c='red', label='\u03b5 = 0.01')
plt.plot(x, r_exp3, c='black', label='\u03b5 = 0.1')
plt.xlabel('Steps')
plt.ylabel('Average reward')
plt.legend()
plt.show()
plt.plot(x, o_exp1, c='green', label='\u03b5 = 0')
plt.plot(x, o_exp2, c='red', label='\u03b5 = 0.01')
plt.plot(x, o_exp3, c='black', label='\u03b5 = 0.1')
plt.xlabel('Steps')
plt.ylabel('% Optimal action')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
From these results we can see that the greedy method ($\epsilon$ = 0) performs the worst. This is because it simply selects the same action each time (the first one that gives a positive reward, remember, all are initialized to 0.0 so have equal liklihood initially). The other experiments involve exploration of varying degrees and so can be seen improving wiith time. Incremental ImplementationIn the code listing above, we computed the values of actions by summing rewards and then dividing by the number of times a particular action was taken. i.e.$Q_n = \frac{R_1 + R_2 + ... + R_{n-1}}{n - 1}$An obvious implemtattion would be to maintain a record of all the rewards and then perform this computation when needed. This can be memory intensive and is unnecessary. We can show that that computation can be computed incrementally as:$Q_{n+1} = Q_n + \frac{1}{n}[R_n - Q_n]$ Proof\begin{align}Q_{n+1} & = \frac{1}{n}\sum_{i = 1}^n R_i \\& = \frac{1}{n}\left(R_n + \sum_{i=1}^{n-1} R_i\right) \\& = \frac{1}{n}\left(R_n + (n-1)\frac{1}{n-1} \sum_{i = 1}^{n-1} R_i\right) \\& = \frac{1}{n}\left(R_n + (n-1)Q_n\right) \\& = \frac{1}{n}\left(R_n + nQ_n - Q_n\right) \\& = Q_n + \frac{1}{n} \left(R_n - Q_n\right)\end{align}
###Code
# We get rid of the sample_average() method and modify k_armed_bandit as follows:
# Lines marked ** indicate changes to the code
def k_armed_bandit(k, epsilon, iterations):
"""
Performs a single run of the k-armed bandit experiment
:param k: the number of arms
:param epsilon: Value of epsilon for epoch-greedy action selection
"""
# Randomly assign true values of reward for each action with mean 0 and variance 1
true_values = np.random.normal(size=k)
# Estimates of action values **
Q = np.zeros(k)
# N[i] is the no. of times action i has been taken **
N = np.zeros(k)
# Store the rewards received for this experiment
rewards = []
# Track how often the optimal action was selected
optimal = []
optimal_action = true_values.argmax()
for _ in range(iterations):
prob = np.random.rand(1)
if prob > epsilon:
# Greedy (exploit current knowledge) **
a = np.random.choice(np.flatnonzero(Q == Q.max()))
else:
# Explore (take random action)
a = np.random.randint(0, k)
reward = get_reward(true_values, a)
# Update statistics for executed action **
N[a] += reward
Q[a] += (1.0 / N[a]) * (reward - Q[a])
rewards.append(reward)
optimal.append(1 if a == optimal_action else 0)
return rewards, optimal
###Output
_____no_output_____ |
code/data_process/data_prep_201115.ipynb | ###Markdown
Inspect TEMPURA dataset
###Code
tempura = pd.read_csv("../../data/thermo/200617_TEMPURA.csv")
tempura.head()
tempura.groupby("superkingdom").count().iloc[:,0:2]
tempura.loc[:,["superkingdom","Genome_GC","16S_GC","Tmin","Tmax","Tmax_Tmin"]].groupby("superkingdom").agg(['mean', 'max','min'])
tempura.loc[tempura["taxonomy_id"]==760142,:]
###Output
_____no_output_____
###Markdown
Inspect istable2.0 data
###Code
istable_train = pd.read_csv("../../data/thermo/istable_S3568_training.txt",sep = '\t')
istable_train.head()
istable_test = pd.read_csv("../../data/thermo/istable_S630.txt",sep = '\t')
istable_test.head()
###Output
_____no_output_____
###Markdown
Inspect Thermo Prot dataset- The temperature of thermophilic proteins in this dataset was set to above 60°C and the temperature of non-thermophilic proteins was set to be less than 30°C
###Code
records = SeqIO.parse("../../data/thermo/thermo_prot_nonthermophilic_2.fasta", "fasta")
count = SeqIO.write(records, "../../data/thermo/thermo_prot_nonthermophilic_2.tab", "tab")
print("Converted %i records" % count)
thermo_prot_nonthermophilic_2 = pd.read_csv("../../data/thermo/thermo_prot_nonthermophilic_2.tab",sep = '\t',header=None)
thermo_prot_nonthermophilic_2.columns = ["id", "seq"]
thermo_prot_nonthermophilic_2.head()
records = SeqIO.parse("../../data/thermo/thermo_prot_thermophilic_2.fasta", "fasta")
count = SeqIO.write(records, "../../data/thermo/thermo_prot_thermophilic_2.tab", "tab")
print("Converted %i records" % count)
thermo_prot_thermophilic_2 = pd.read_csv("../../data/thermo/thermo_prot_thermophilic_2.tab",sep = '\t',header=None)
thermo_prot_thermophilic_2.columns = ["id", "seq"]
thermo_prot_thermophilic_2.head()
###Output
Converted 106 records
###Markdown
Inspect ProThermDat dataset- for pdt_y: 1 is thermophilic, 0 is otherwise
###Code
pdt_X = np.load("../../data/thermo/pdt_X_unique_huge.npy")
pdt_y = np.load("../../data/thermo/pdt_y_unique_huge.npy",allow_pickle=True)
pdt_header = np.load("../../data/thermo/pdt_header_unique_huge.npy",allow_pickle=True)
pdt_X[1]
print(pdt_X.shape)
print(pdt_y.shape)
print(pdt_header)
IUPAC_Extended_Dic_Transf = {"A":1,"C":2,"D":3,"E":4,"F":5,"G":6,"H":7,"I":8,"K":9,"L":10,"M":11,"N":12,"P":13,"Q":14,"R":15,"S":16,"T":17,"V":18,"W":19,"Y":20,"B":21,"X":21,"Z":21,"J":21,"U":21,"O":21}
tok_to_aa = [(b,a) for (a,b) in IUPAC_Extended_Dic_Transf.items()]
tok_to_aa.append((0,''))
tok_to_aa = dict(tok_to_aa)
# convert tokenized aa sequence back to real sequence
''.join([tok_to_aa[x] for x in pdt_X[0,:]])
# get the domains from the superfamily belonging to the motor proteins
p_loop_gtpase = "PF06414;PF06564;PF07015;PF02367;PF02534;PF06309;PF05621;PF00265;PF06068;PF02223;PF00685;PF00448;PF02463;PF01202;PF00158;PF10443;PF03215;PF00485;PF00519;PF06431;PF01057;PF10609;PF00931;PF05729;PF00488;PF03205;PF09140;PF01078;PF00493;PF08433;PF07693;PF01695;PF01745;PF01715;PF00693;PF00625;PF00437;PF01580;PF00142;PF01935;PF05872;PF05673;PF01712;PF06144;PF02224;PF06418;PF07931;PF02492;PF01121;PF01656;PF00308;PF03668;PF00006;PF02374;PF03308;PF01637;PF01583;PF03969;PF00406;PF00709;PF00005;PF08298;PF07728;PF07726;PF07724;PF00004;PF05707;PF11496;PF10649;PF10412;PF10236;PF09820;PF09818;PF09547;PF09439;PF09037;PF08423;PF07755;PF07652;PF07088;PF06990;PF06733;PF05970;PF05894;PF05879;PF05876;PF05783;PF05127;PF04670;PF04466;PF04257;PF03976;PF03796;PF03567;PF03354;PF02689;PF02606;PF02572;PF02499;PF02399;PF01268;PF00350;PF00225;PF00063;PF10662;PF00271;PF00910;PF05496;PF02562;PF00025;PF05049;PF03266;PF01591;PF00071;PF04548;PF00009;PF00176;PF07517;PF03237;PF00735;PF04665;PF02263;PF01926;PF00580;PF00270;PF09848;PF06745;PF04851;PF01443;PF03193;PF00503;PF06858;PF02283;PF02456;PF00154;PF03029;PF08477;PF02421;PF12696;PF07999;PF04310;PF05272;PF06048;PF12774;PF12775;PF12780;PF04317;PF12846;PF11398;PF13086;PF13087;PF13166;PF13173;PF13175;PF13177;PF13189;PF10923;PF13191;PF08303;PF13207;PF13238;PF13245;PF13304;PF13307;PF13361;PF13401;PF13469;PF12128;PF13479;PF13476;PF13481;PF13500;PF13514;PF13521;PF13538;PF13555;PF13558;PF13604;PF13614;PF13654;PF13671;PF13872;PF14516;PF14532;PF14617;PF05625;PF05179;PF16203;PF14417;PF02500;PF13337;PF11602;PF16575;PF16796;PF13871;PF02702;PF10088;PF03846;PF17213;PF12848;PF12399;PF14396;PF10996;PF09711;PF11111;PF08351;PF03192;PF02841;PF05609;PF08438;PF12344;PF12781;PF03028;PF16813;PF16834;PF16836;PF07034;PF10483;PF09807;PF03618;PF17784;PF18128;PF18133;PF07529;PF18747;PF18748;PF18751;PF18766;PF18082;PF19044;PF19263;"
tubulin_binding = "PF10644;PF14881;PF13809;PF00091;"
tubulin_c = "PF03953;PF12327;"
actin_like = "PF06406;PF00480;PF02541;PF00814;PF06723;PF05378;PF01968;PF00012;PF03727;PF00349;PF02685;PF01150;PF03630;PF00370;PF02782;PF06277;PF02543;PF03309;PF01869;PF00022;PF00871;PF03702;PF08841;PF07318;PF05134;PF11104;PF13941;PF14450;PF09989;PF06050;PF17003;PF14574;PF17788;PF17989;"
p_loop_gtpase = p_loop_gtpase.split(";")[0:-1]
tubulin_binding = tubulin_binding.split(";")[0:-1]
tubulin_c = tubulin_c.split(";")[0:-1]
actin_like = actin_like.split(";")[0:-1]
motors_related = p_loop_gtpase + tubulin_binding + tubulin_c + actin_like
in_p_loop_gtpase = pd.Series(pdt_header[:,1]).isin(p_loop_gtpase)
in_tubulin_binding = pd.Series(pdt_header[:,1]).isin(tubulin_binding)
in_tubulin_c = pd.Series(pdt_header[:,1]).isin(tubulin_c)
in_actin_like = pd.Series(pdt_header[:,1]).isin(actin_like)
in_motors_related = pd.Series(pdt_header[:,1]).isin(motors_related)
pdt_X_motor = pdt_X[in_motors_related,:]
pdt_y_motor = pdt_y[in_motors_related]
pdt_header_motor = pdt_header[in_motors_related,:]
pdt_X_list = [pdt_X_motor[i] for i in range(pdt_X_motor.shape[0])]
# build up the dataframe
pdt_motor = pd.DataFrame({"uniprot_id":pdt_header_motor[:,0], "pfam_id":pdt_header_motor[:,1], 'is_thermophilic': pdt_y_motor, "token":pdt_X_list})
pdt_motor.head()
pdt_seq_motor = []
for i in range(pdt_motor.shape[0]):
token = pdt_motor.iloc[i,3]
if i%10000 == 0:
print(i)
# print(token)
curr_seq = ''.join([tok_to_aa[x] for x in token])
pdt_seq_motor.append(curr_seq)
pdt_seq_motor[0]
pdt_motor["seq"] = pdt_seq_motor
pdt_motor.head()
pdt_motor["clan"] = "na"
in_p_loop_gtpase = pd.Series(pdt_header_motor[:,1]).isin(p_loop_gtpase)
in_tubulin_binding = pd.Series(pdt_header_motor[:,1]).isin(tubulin_binding)
in_tubulin_c = pd.Series(pdt_header_motor[:,1]).isin(tubulin_c)
in_actin_like = pd.Series(pdt_header_motor[:,1]).isin(actin_like)
print(sum(in_p_loop_gtpase))
print(sum(in_tubulin_binding))
print(sum(in_tubulin_c))
print(sum(in_actin_like))
pdt_motor.loc[in_p_loop_gtpase,"clan"] = "p_loop_gtpase"
pdt_motor.loc[in_tubulin_binding,"clan"] = "tubulin_binding"
pdt_motor.loc[in_tubulin_c,"clan"] = "tubulin_c"
pdt_motor.loc[in_actin_like,"clan"] = "actin_like"
pdt_motor.groupby(["clan","is_thermophilic"]).count()
# perform a sanity check on 10 thermophilic pfam proteins (by checking their taxonomy)
pdt_motor.loc[pdt_motor["is_thermophilic"]==1,:].iloc[0:10,:]
###Output
_____no_output_____
###Markdown
- F2LWD8_HIPMA: Hippea maritima is a bacterium from the genus of Hippea which has been isolated from sediments from a hydrothermal vent from Matupi Harbour in Papua New Guinea- A0A0A8WZL0: B.selenatarsenatis is a mesophile with its optimal growth temperature between 25~40 degrees Celsius, and a pH between 7.5~9.0.- I3EAK2: Bacillus methanolicus MGA3, was isolated from freshwater marsh soils, and grows rapidly in cultures heated to up to 60 °C using only methanol as a carbon source.
###Code
# export pdt_motor to a csv file
pdt_motor.to_csv("../../data/thermo/pdt_motor.csv",index = False)
###Output
_____no_output_____ |
notebooks/Stochastic Bandits - Preference Estimation.ipynb | ###Markdown
Stochastic Multi-Armed Bandits - Preference EstimationThese examples come from Chapter 2 of [Reinforcement Learning: An Introduction](https://webdocs.cs.ualberta.ca/~sutton/book/the-book.html) by Sutton and Barto (2nd ed. rev: Oct2015)
###Code
%matplotlib inline
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import bandits as bd
###Output
_____no_output_____
###Markdown
Instead of estimating the expected reward from selecting a particular arm, we may only care about the relative preference of one arm to another.
###Code
n_arms = 10
bandit = bd.GaussianBandit(n_arms, mu=4)
n_trials = 1000
n_experiments = 500
###Output
_____no_output_____
###Markdown
SoftmaxPreference learning uses a Softmax-based policy, where the action estimates are converted to a probability distribution using the softmax function. This is then sampled to produce the chosen arm.
###Code
policy = bd.SoftmaxPolicy()
agents = [
bd.GradientAgent(bandit, policy, alpha=0.1),
bd.GradientAgent(bandit, policy, alpha=0.4),
bd.GradientAgent(bandit, policy, alpha=0.1, baseline=False),
bd.GradientAgent(bandit, policy, alpha=0.4, baseline=False)
]
env = bd.Environment(bandit, agents, 'Gradient Agents')
scores, optimal = env.run(n_trials, n_experiments)
env.plot_results(scores, optimal)
###Output
_____no_output_____
###Markdown
Stochastic Multi-Armed Bandits - Preference EstimationThese examples come from Chapter 2 of [Reinforcement Learning: An Introduction](https://webdocs.cs.ualberta.ca/~sutton/book/the-book.html) by Sutton and Barto (2nd ed. rev: Oct2015)
###Code
%matplotlib inline
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import bandits as bd
###Output
_____no_output_____
###Markdown
Instead of estimating the expected reward from selecting a particular arm, we may only care about the relative preference of one arm to another.
###Code
n_arms = 10
bandit = bd.GaussianBandit(n_arms, mu=4)
n_trials = 1000
n_experiments = 500
###Output
_____no_output_____
###Markdown
SoftmaxPreference learning uses a Softmax-based policy, where the action estimates are converted to a probability distribution using the softmax function. This is then sampled to produce the chosen arm.
###Code
policy = bd.SoftmaxPolicy()
agents = [
bd.GradientAgent(bandit, policy, alpha=0.1),
bd.GradientAgent(bandit, policy, alpha=0.4),
bd.GradientAgent(bandit, policy, alpha=0.1, baseline=False),
bd.GradientAgent(bandit, policy, alpha=0.4, baseline=False)
]
env = bd.Environment(bandit, agents, 'Gradient Agents')
scores, optimal = env.run(n_trials, n_experiments)
env.plot_results(scores, optimal)
###Output
_____no_output_____ |
src/.ipynb_checkpoints/37b > Only Origs-checkpoint.ipynb | ###Markdown
From the useful sheets, we have to formalize a GRL problem.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os
import requests
import io
import seaborn as sns
PATH = '../data/'
files = os.listdir(PATH)
dfs = {f[:-4] : pd.read_csv(PATH + f)
for f in files if f[-3:] == 'csv'
}
###Output
_____no_output_____
###Markdown
What all is useful here:- orig_members: member_id (763)- orig_inline_notifications: notify_from_id, notify_to_id (47066)- orig_message_topic_user_map: map_user_id, map_topic_id (6025)- orig_message_topics: mt_starter_id, mt_to_member_id, mt_id (3101)- orig_reputation_index: member_id (141550)
###Code
#76. 763 Original members.
sum(dfs['orig_members'].email.isnull()) # (== 0) = True
dfs['orig_members'].head(4);
mems = list(dfs['orig_members'].member_id)
#75. Useful. Notification Graph could give us very informative edges.
# from notify_from_id to notify_to_id based on notify_type_key
# and notify_type_key could be a nice edge feature
dfs['orig_inline_notifications'].notify_type_key.value_counts();
# wow, both are fully included
print(np.mean([m in mems for m in dfs['orig_inline_notifications'].notify_from_id]))
print(np.mean([m in mems for m in dfs['orig_inline_notifications'].notify_to_id]))
dfs['orig_inline_notifications'].head(4)
#78. Useful. But don't fully understand.
# Mapping from user_id to topic_id, might help connect users.
print(np.mean([m in mems for m in dfs['orig_message_topic_user_map'].map_user_id]))
dfs['orig_message_topic_user_map'].head(4);
# TODO:
# Check if users following the same topic are same in
# this map and core_message_topics and orig_message_topics
# reference with topic_title and compare the topic_id v topic_title mapping of both
# orig and core
ids = 0
for i in range(len(dfs['orig_message_topics'].mt_title)):
if dfs['orig_message_topics'].mt_title[i] == dfs['core_message_topics'].mt_title[i]:
ids += 1
print(ids)
len(dfs['orig_message_topics'].mt_title), len(dfs['core_message_topics'].mt_title)
#79 All Orig Message Topics (total 3101)
# mt_id is same as map_topic_id from orig_message_topic_user_map
# mt_starter_id is the member_id of the person who put the first message on that topic
# mt_to_member_id is the member_id of the recipient of this message
tops = list(dfs['orig_message_topics'].mt_id)
print(np.mean([m in mems for m in dfs['orig_message_topics'].mt_starter_id]))
print(np.mean([m in mems for m in dfs['orig_message_topics'].mt_to_member_id]))
print(np.mean([m in tops for m in dfs['orig_message_topic_user_map'].map_topic_id]))
dfs['orig_message_topics'].head(5);
#82. pfields of 764 members. Might help in node features. Not really, mostly nans.
dfs['orig_pfields_content'].head(3);
# What is reputation? Total 141550
# 635 users have a reputation index, could be used for node classification or features?
members = set(dfs['orig_reputation_index'].member_id)
print(np.mean([m in mems for m in members]))
# print(members)
freq = [[m, sum(dfs['orig_reputation_index'].member_id == m)]
for m in members]
# dfs['orig_reputation_index'].head(3)
freq_sort = sorted(freq, key = lambda z: z[1], reverse=True)
freq_sorted = pd.DataFrame(freq_sort)
i = 0
while freq_sorted[1][i] > 30:
i += 1
print(i)
print(len(freq_sorted[1]) - i)
plt.plot(freq_sorted[1])
plt.grid()
###Output
_____no_output_____ |
aprendizado-de-maquina-i/regressao/salaries.ipynb | ###Markdown
Regressão linear simples
###Code
from sklearn.linear_model import LinearRegression
lr_1 = LinearRegression()
lr_1.fit(X, y)
from sklearn.preprocessing import PolynomialFeatures
pf = PolynomialFeatures(degree=2)
X_poly = pf.fit_transform(X)
lr_2 = LinearRegression()
lr_2.fit(X_poly, y)
###Output
_____no_output_____
###Markdown
Visualizando os resultados Regressão linear
###Code
plt.scatter(X, y, color="red")
plt.plot(X, lr_1.predict(X), color="blue")
plt.title('Nível versus Salários (RL)')
plt.xlabel('Nível')
plt.ylabel('Salário')
from sklearn.metrics import mean_squared_error
y_pred = lr_1.predict(X)
rmse = np.sqrt(mean_squared_error(y, y_pred))
print('RMSE {}'.format(rmse))
###Output
RMSE 163388.73519272613
###Markdown
Regressão polinomial
###Code
plt.scatter(X, y, color="red")
plt.plot(X, lr_2.predict(X_poly), color="blue")
plt.title('Nível versus Salários (RL)')
plt.xlabel('Nível')
plt.ylabel('Salário')
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
y_pred = lr_2.predict(X_poly)
mae = mean_absolute_error(y, y_pred)
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mean_squared_error(y, y_pred))
print('Comparação enntre metricas de erros:')
print('MAE {}'.format(mae))
print('MSE {}'.format(mse))
print('RMSE {}'.format(rmse))
###Output
Comparação enntre metricas de erros:
MAE 70218.1818181819
MSE 6758833333.333336
RMSE 82212.12400451247
|
notebooks/DeepFM_NFM_DeepCTR.ipynb | ###Markdown
CTR预估(2)资料&&代码整理by[@寒小阳](https://blog.csdn.net/han_xiaoyang)([email protected])reference:* [《广告点击率预估是怎么回事?》](https://zhuanlan.zhihu.com/p/23499698)* [从ctr预估问题看看f(x)设计—DNN篇](https://zhuanlan.zhihu.com/p/28202287)* [Atomu2014 product_nets](https://github.com/Atomu2014/product-nets) 同样以criteo数据为例,我们来看看深度学习的应用。 特征工程特征工程是比较重要的数据处理过程,这里对criteo数据依照[paddlepaddle做ctr预估特征工程](https://github.com/PaddlePaddle/models/blob/develop/deep_fm/preprocess.py)完成特征工程
###Code
#coding=utf8
"""
特征工程参考(https://github.com/PaddlePaddle/models/blob/develop/deep_fm/preprocess.py)完成
-对数值型特征,normalize处理
-对类别型特征,对长尾(出现频次低于200)的进行过滤
"""
import os
import sys
import random
import collections
import argparse
from multiprocessing import Pool as ThreadPool
# 13个连续型列,26个类别型列
continous_features = range(1, 14)
categorial_features = range(14, 40)
# 对连续值进行截断处理(取每个连续值列的95%分位数)
continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
class CategoryDictGenerator:
"""
类别型特征编码字典
"""
def __init__(self, num_feature):
self.dicts = []
self.num_feature = num_feature
for i in range(0, num_feature):
self.dicts.append(collections.defaultdict(int))
def build(self, datafile, categorial_features, cutoff=0):
with open(datafile, 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
for i in range(0, self.num_feature):
if features[categorial_features[i]] != '':
self.dicts[i][features[categorial_features[i]]] += 1
for i in range(0, self.num_feature):
self.dicts[i] = filter(lambda x: x[1] >= cutoff, self.dicts[i].items())
self.dicts[i] = sorted(self.dicts[i], key=lambda x: (-x[1], x[0]))
vocabs, _ = list(zip(*self.dicts[i]))
self.dicts[i] = dict(zip(vocabs, range(1, len(vocabs) + 1)))
self.dicts[i]['<unk>'] = 0
def gen(self, idx, key):
if key not in self.dicts[idx]:
res = self.dicts[idx]['<unk>']
else:
res = self.dicts[idx][key]
return res
def dicts_sizes(self):
return map(len, self.dicts)
class ContinuousFeatureGenerator:
"""
对连续值特征做最大最小值normalization
"""
def __init__(self, num_feature):
self.num_feature = num_feature
self.min = [sys.maxint] * num_feature
self.max = [-sys.maxint] * num_feature
def build(self, datafile, continous_features):
with open(datafile, 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
for i in range(0, self.num_feature):
val = features[continous_features[i]]
if val != '':
val = int(val)
if val > continous_clip[i]:
val = continous_clip[i]
self.min[i] = min(self.min[i], val)
self.max[i] = max(self.max[i], val)
def gen(self, idx, val):
if val == '':
return 0.0
val = float(val)
return (val - self.min[idx]) / (self.max[idx] - self.min[idx])
def preprocess(input_dir, output_dir):
"""
对连续型和类别型特征进行处理
"""
dists = ContinuousFeatureGenerator(len(continous_features))
dists.build(input_dir + 'train.txt', continous_features)
dicts = CategoryDictGenerator(len(categorial_features))
dicts.build(input_dir + 'train.txt', categorial_features, cutoff=150)
output = open(output_dir + 'feature_map','w')
for i in continous_features:
output.write("{0} {1}\n".format('I'+str(i), i))
dict_sizes = dicts.dicts_sizes()
categorial_feature_offset = [dists.num_feature]
for i in range(1, len(categorial_features)+1):
offset = categorial_feature_offset[i - 1] + dict_sizes[i - 1]
categorial_feature_offset.append(offset)
for key, val in dicts.dicts[i-1].iteritems():
output.write("{0} {1}\n".format('C'+str(i)+'|'+key, categorial_feature_offset[i - 1]+val+1))
random.seed(0)
# 90%的数据用于训练,10%的数据用于验证
with open(output_dir + 'tr.libsvm', 'w') as out_train:
with open(output_dir + 'va.libsvm', 'w') as out_valid:
with open(input_dir + 'train.txt', 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
feat_vals = []
for i in range(0, len(continous_features)):
val = dists.gen(i, features[continous_features[i]])
feat_vals.append(str(continous_features[i]) + ':' + "{0:.6f}".format(val).rstrip('0').rstrip('.'))
for i in range(0, len(categorial_features)):
val = dicts.gen(i, features[categorial_features[i]]) + categorial_feature_offset[i]
feat_vals.append(str(val) + ':1')
label = features[0]
if random.randint(0, 9999) % 10 != 0:
out_train.write("{0} {1}\n".format(label, ' '.join(feat_vals)))
else:
out_valid.write("{0} {1}\n".format(label, ' '.join(feat_vals)))
with open(output_dir + 'te.libsvm', 'w') as out:
with open(input_dir + 'test.txt', 'r') as f:
for line in f:
features = line.rstrip('\n').split('\t')
feat_vals = []
for i in range(0, len(continous_features)):
val = dists.gen(i, features[continous_features[i] - 1])
feat_vals.append(str(continous_features[i]) + ':' + "{0:.6f}".format(val).rstrip('0').rstrip('.'))
for i in range(0, len(categorial_features)):
val = dicts.gen(i, features[categorial_features[i] - 1]) + categorial_feature_offset[i]
feat_vals.append(str(val) + ':1')
out.write("{0} {1}\n".format(label, ' '.join(feat_vals)))
input_dir = './criteo_data/'
output_dir = './criteo_data/'
print("开始数据处理与特征工程...")
preprocess(input_dir, output_dir)
!head -5 ./criteo_data/tr.libsvm
###Output
0 1:0.2 2:0.028192 3:0.07 4:0.56 5:0.000562 6:0.056 7:0.04 8:0.86 9:0.094 10:0.25 11:0.2 12:0 13:0.56 14:1 169:1 631:1 1414:1 2534:1 2584:1 4239:1 4991:1 5060:1 5064:1 7141:1 8818:1 9906:1 11250:1 11377:1 12951:1 13833:1 13883:1 14817:1 15204:1 15327:1 16118:1 16128:1 16183:1 17289:1 17361:1
1 1:0 2:0.004975 3:0.11 4:0 5:1.373375 6:0 7:0 8:0.14 9:0 10:0 11:0 12:0 13:0 14:1 181:1 543:1 1379:1 2534:1 2582:1 3632:1 4990:1 5061:1 5217:1 6925:1 8726:1 9705:1 11250:1 11605:1 12849:1 13835:1 13971:1 14816:1 15202:1 15224:1 16118:1 16129:1 16148:1 17280:1 17320:1
0 1:0.1 2:0.008292 3:0.28 4:0.14 5:0.000016 6:0.002 7:0.21 8:0.14 9:0.786 10:0.125 11:0.4 12:0 13:0.02 14:1 209:1 632:1 1491:1 2534:1 2582:1 2719:1 4995:1 5060:1 5069:1 6960:1 8820:1 9727:1 11249:1 11471:1 12933:1 13834:1 13927:1 14817:1 15204:1 15328:1 16118:1 16129:1 16185:1 17282:1 17364:1
1 1:0 2:0.003317 3:0.04 4:0.5 5:0.504031 6:0.222 7:0.02 8:0.72 9:0.16 10:0 11:0.1 12:0 13:0.5 14:1 156:1 529:1 1377:1 2534:1 2583:1 3768:1 4990:1 5060:1 5247:1 7131:1 8711:1 9893:1 11250:1 11361:1 12827:1 13835:1 13888:1 14816:1 15202:1 15207:1 16118:1 16129:1 16145:1 17280:1 17320:1
0 1:0 2:0.004975 3:0.28 4:0.14 5:0.022766 6:0.058 7:0.05 8:0.28 9:0.464 10:0 11:0.3 12:0 13:0.14 15:1 142:1 528:1 1376:1 2535:1 2582:1 2659:1 4997:1 5060:1 5064:1 7780:1 8710:1 9703:1 11250:1 11324:1 12826:1 13834:1 13861:1 14817:1 15203:1 15206:1 16118:1 16128:1 16746:1 17282:1 17320:1
###Markdown
DeepFMreference:[常见的计算广告点击率预估算法总结](https://zhuanlan.zhihu.com/p/29053940)DeepFM结合了wide and deep和FM,其实就是把PNN和WDL结合了。原始的Wide and Deep,Wide的部分只是LR,构造线性关系,Deep部分建模更高阶的关系,所以在Wide and Deep中还需要做一些特征的东西,如Cross Column的工作,FM是可以建模二阶关系达到Cross column的效果,DeepFM就是把FM和NN结合,无需再对特征做诸如Cross Column的工作了。FM部分如下:Deep部分如下:总体结构图:DeepFM相对于FNN、PNN,能够利用其Deep部分建模更高阶信息(二阶以上),而相对于Wide and Deep能够减少特征工程的部分工作,wide部分类似FM建模一、二阶特征间关系, 算是NN和FM的一个很好的结合,另外不同的是如下图,DeepFM的wide和deep部分共享embedding向量空间,wide和deep均可以更新embedding部分
###Code
# %load DeepFM.py
#!/usr/bin/env python
"""
#1 Input pipline using Dataset high level API, Support parallel and prefetch reading
#2 Train pipline using Coustom Estimator by rewriting model_fn
#3 Support distincted training using TF_CONFIG
#4 Support export_model for TensorFlow Serving
方便迁移到其他算法上,只要修改input_fn and model_fn
by lambdaji
"""
#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function
#import argparse
import shutil
#import sys
import os
import json
import glob
from datetime import date, timedelta
from time import time
#import gc
#from multiprocessing import Process
#import math
import random
import pandas as pd
import numpy as np
import tensorflow as tf
#################### CMD Arguments ####################
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("dist_mode", 0, "distribuion mode {0-loacal, 1-single_dist, 2-multi_dist}")
tf.app.flags.DEFINE_string("ps_hosts", '', "Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", '', "Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", '', "One of 'ps', 'worker'")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of task within the job")
tf.app.flags.DEFINE_integer("num_threads", 16, "Number of threads")
tf.app.flags.DEFINE_integer("feature_size", 0, "Number of features")
tf.app.flags.DEFINE_integer("field_size", 0, "Number of fields")
tf.app.flags.DEFINE_integer("embedding_size", 32, "Embedding size")
tf.app.flags.DEFINE_integer("num_epochs", 10, "Number of epochs")
tf.app.flags.DEFINE_integer("batch_size", 64, "Number of batch size")
tf.app.flags.DEFINE_integer("log_steps", 1000, "save summary every steps")
tf.app.flags.DEFINE_float("learning_rate", 0.0005, "learning rate")
tf.app.flags.DEFINE_float("l2_reg", 0.0001, "L2 regularization")
tf.app.flags.DEFINE_string("loss_type", 'log_loss', "loss type {square_loss, log_loss}")
tf.app.flags.DEFINE_string("optimizer", 'Adam', "optimizer type {Adam, Adagrad, GD, Momentum}")
tf.app.flags.DEFINE_string("deep_layers", '256,128,64', "deep layers")
tf.app.flags.DEFINE_string("dropout", '0.5,0.5,0.5', "dropout rate")
tf.app.flags.DEFINE_boolean("batch_norm", False, "perform batch normaization (True or False)")
tf.app.flags.DEFINE_float("batch_norm_decay", 0.9, "decay for the moving average(recommend trying decay=0.9)")
tf.app.flags.DEFINE_string("data_dir", '', "data dir")
tf.app.flags.DEFINE_string("dt_dir", '', "data dt partition")
tf.app.flags.DEFINE_string("model_dir", '', "model check point dir")
tf.app.flags.DEFINE_string("servable_model_dir", '', "export servable model for TensorFlow Serving")
tf.app.flags.DEFINE_string("task_type", 'train', "task type {train, infer, eval, export}")
tf.app.flags.DEFINE_boolean("clear_existing_model", False, "clear existing model or not")
#1 1:0.5 2:0.03519 3:1 4:0.02567 7:0.03708 8:0.01705 9:0.06296 10:0.18185 11:0.02497 12:1 14:0.02565 15:0.03267 17:0.0247 18:0.03158 20:1 22:1 23:0.13169 24:0.02933 27:0.18159 31:0.0177 34:0.02888 38:1 51:1 63:1 132:1 164:1 236:1
def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
print('Parsing', filenames)
def decode_libsvm(line):
#columns = tf.decode_csv(value, record_defaults=CSV_COLUMN_DEFAULTS)
#features = dict(zip(CSV_COLUMNS, columns))
#labels = features.pop(LABEL_COLUMN)
columns = tf.string_split([line], ' ')
labels = tf.string_to_number(columns.values[0], out_type=tf.float32)
splits = tf.string_split(columns.values[1:], ':')
id_vals = tf.reshape(splits.values,splits.dense_shape)
feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1)
feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)
feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)
#feat_ids = tf.reshape(feat_ids,shape=[-1,FLAGS.field_size])
#for i in range(splits.dense_shape.eval()[0]):
# feat_ids.append(tf.string_to_number(splits.values[2*i], out_type=tf.int32))
# feat_vals.append(tf.string_to_number(splits.values[2*i+1]))
#return tf.reshape(feat_ids,shape=[-1,field_size]), tf.reshape(feat_vals,shape=[-1,field_size]), labels
return {"feat_ids": feat_ids, "feat_vals": feat_vals}, labels
# Extract lines from input files using the Dataset API, can pass one filename or filename list
dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(500000) # multi-thread pre-process then prefetch
# Randomizes input using a window of 256 elements (read into memory)
if perform_shuffle:
dataset = dataset.shuffle(buffer_size=256)
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size) # Batch size to use
#return dataset.make_one_shot_iterator()
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
#return tf.reshape(batch_ids,shape=[-1,field_size]), tf.reshape(batch_vals,shape=[-1,field_size]), batch_labels
return batch_features, batch_labels
def model_fn(features, labels, mode, params):
"""Bulid Model function f(x) for Estimator."""
#------hyperparameters----
field_size = params["field_size"]
feature_size = params["feature_size"]
embedding_size = params["embedding_size"]
l2_reg = params["l2_reg"]
learning_rate = params["learning_rate"]
#batch_norm_decay = params["batch_norm_decay"]
#optimizer = params["optimizer"]
layers = map(int, params["deep_layers"].split(','))
dropout = map(float, params["dropout"].split(','))
#------bulid weights------
FM_B = tf.get_variable(name='fm_bias', shape=[1], initializer=tf.constant_initializer(0.0))
FM_W = tf.get_variable(name='fm_w', shape=[feature_size], initializer=tf.glorot_normal_initializer())
# F
FM_V = tf.get_variable(name='fm_v', shape=[feature_size, embedding_size], initializer=tf.glorot_normal_initializer())
# F * E
#------build feaure-------
feat_ids = features['feat_ids']
feat_ids = tf.reshape(feat_ids,shape=[-1,field_size]) # None * f/K * K
feat_vals = features['feat_vals']
feat_vals = tf.reshape(feat_vals,shape=[-1,field_size]) # None * f/K * K
#------build f(x)------
with tf.variable_scope("First-order"):
feat_wgts = tf.nn.embedding_lookup(FM_W, feat_ids) # None * f/K * K
y_w = tf.reduce_sum(tf.multiply(feat_wgts, feat_vals),1)
with tf.variable_scope("Second-order"):
embeddings = tf.nn.embedding_lookup(FM_V, feat_ids) # None * f/K * K * E
feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1]) # None * f/K * K * 1 ?
embeddings = tf.multiply(embeddings, feat_vals) #vij*xi
sum_square = tf.square(tf.reduce_sum(embeddings,1)) # None * K * E
square_sum = tf.reduce_sum(tf.square(embeddings),1)
y_v = 0.5*tf.reduce_sum(tf.subtract(sum_square, square_sum),1) # None * 1
with tf.variable_scope("Deep-part"):
if FLAGS.batch_norm:
#normalizer_fn = tf.contrib.layers.batch_norm
#normalizer_fn = tf.layers.batch_normalization
if mode == tf.estimator.ModeKeys.TRAIN:
train_phase = True
#normalizer_params = {'decay': batch_norm_decay, 'center': True, 'scale': True, 'updates_collections': None, 'is_training': True, 'reuse': None}
else:
train_phase = False
#normalizer_params = {'decay': batch_norm_decay, 'center': True, 'scale': True, 'updates_collections': None, 'is_training': False, 'reuse': True}
else:
normalizer_fn = None
normalizer_params = None
deep_inputs = tf.reshape(embeddings,shape=[-1,field_size*embedding_size]) # None * (F*K)
for i in range(len(layers)):
#if FLAGS.batch_norm:
# deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i)
#normalizer_params.update({'scope': 'bn_%d' %i})
deep_inputs = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=layers[i], \
#normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, \
weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='mlp%d' % i)
if FLAGS.batch_norm:
deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i) #放在RELU之后 https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md#bn----before-or-after-relu
if mode == tf.estimator.ModeKeys.TRAIN:
deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[i]) #Apply Dropout after all BN layers and set dropout=0.8(drop_ratio=0.2)
#deep_inputs = tf.layers.dropout(inputs=deep_inputs, rate=dropout[i], training=mode == tf.estimator.ModeKeys.TRAIN)
y_deep = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \
weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='deep_out')
y_d = tf.reshape(y_deep,shape=[-1])
#sig_wgts = tf.get_variable(name='sigmoid_weights', shape=[layers[-1]], initializer=tf.glorot_normal_initializer())
#sig_bias = tf.get_variable(name='sigmoid_bias', shape=[1], initializer=tf.constant_initializer(0.0))
#deep_out = tf.nn.xw_plus_b(deep_inputs,sig_wgts,sig_bias,name='deep_out')
with tf.variable_scope("DeepFM-out"):
#y_bias = FM_B * tf.ones_like(labels, dtype=tf.float32) # None * 1 warning;这里不能用label,否则调用predict/export函数会出错,train/evaluate正常;初步判断estimator做了优化,用不到label时不传
y_bias = FM_B * tf.ones_like(y_d, dtype=tf.float32) # None * 1
y = y_bias + y_w + y_v + y_d
pred = tf.sigmoid(y)
predictions={"prob": pred}
export_outputs = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}
# Provide an estimator spec for `ModeKeys.PREDICT`
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs=export_outputs)
#------bulid loss------
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels)) + \
l2_reg * tf.nn.l2_loss(FM_W) + \
l2_reg * tf.nn.l2_loss(FM_V) #+ \ l2_reg * tf.nn.l2_loss(sig_wgts)
# Provide an estimator spec for `ModeKeys.EVAL`
eval_metric_ops = {
"auc": tf.metrics.auc(labels, pred)
}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
eval_metric_ops=eval_metric_ops)
#------bulid optimizer------
if FLAGS.optimizer == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8)
elif FLAGS.optimizer == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate, initial_accumulator_value=1e-8)
elif FLAGS.optimizer == 'Momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
# Provide an estimator spec for `ModeKeys.TRAIN` modes
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
# Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
#return tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# train_op=train_op,
# predictions={"prob": pred},
# eval_metric_ops=eval_metric_ops)
def batch_norm_layer(x, train_phase, scope_bn):
bn_train = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=True, reuse=None, scope=scope_bn)
bn_infer = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=False, reuse=True, scope=scope_bn)
z = tf.cond(tf.cast(train_phase, tf.bool), lambda: bn_train, lambda: bn_infer)
return z
def set_dist_env():
if FLAGS.dist_mode == 1: # 本地分布式测试模式1 chief, 1 ps, 1 evaluator
ps_hosts = FLAGS.ps_hosts.split(',')
chief_hosts = FLAGS.chief_hosts.split(',')
task_index = FLAGS.task_index
job_name = FLAGS.job_name
print('ps_host', ps_hosts)
print('chief_hosts', chief_hosts)
print('job_name', job_name)
print('task_index', str(task_index))
# 无worker参数
tf_config = {
'cluster': {'chief': chief_hosts, 'ps': ps_hosts},
'task': {'type': job_name, 'index': task_index }
}
print(json.dumps(tf_config))
os.environ['TF_CONFIG'] = json.dumps(tf_config)
elif FLAGS.dist_mode == 2: # 集群分布式模式
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
chief_hosts = worker_hosts[0:1] # get first worker as chief
worker_hosts = worker_hosts[2:] # the rest as worker
task_index = FLAGS.task_index
job_name = FLAGS.job_name
print('ps_host', ps_hosts)
print('worker_host', worker_hosts)
print('chief_hosts', chief_hosts)
print('job_name', job_name)
print('task_index', str(task_index))
# use #worker=0 as chief
if job_name == "worker" and task_index == 0:
job_name = "chief"
# use #worker=1 as evaluator
if job_name == "worker" and task_index == 1:
job_name = 'evaluator'
task_index = 0
# the others as worker
if job_name == "worker" and task_index > 1:
task_index -= 2
tf_config = {
'cluster': {'chief': chief_hosts, 'worker': worker_hosts, 'ps': ps_hosts},
'task': {'type': job_name, 'index': task_index }
}
print(json.dumps(tf_config))
os.environ['TF_CONFIG'] = json.dumps(tf_config)
def main(_):
tr_files = glob.glob("%s/tr*libsvm" % FLAGS.data_dir)
random.shuffle(tr_files)
print("tr_files:", tr_files)
va_files = glob.glob("%s/va*libsvm" % FLAGS.data_dir)
print("va_files:", va_files)
te_files = glob.glob("%s/te*libsvm" % FLAGS.data_dir)
print("te_files:", te_files)
if FLAGS.clear_existing_model:
try:
shutil.rmtree(FLAGS.model_dir)
except Exception as e:
print(e, "at clear_existing_model")
else:
print("existing model cleaned at %s" % FLAGS.model_dir)
set_dist_env()
model_params = {
"field_size": FLAGS.field_size,
"feature_size": FLAGS.feature_size,
"embedding_size": FLAGS.embedding_size,
"learning_rate": FLAGS.learning_rate,
"batch_norm_decay": FLAGS.batch_norm_decay,
"l2_reg": FLAGS.l2_reg,
"deep_layers": FLAGS.deep_layers,
"dropout": FLAGS.dropout
}
config = tf.estimator.RunConfig().replace(session_config = tf.ConfigProto(device_count={'GPU':0, 'CPU':FLAGS.num_threads}),
log_step_count_steps=FLAGS.log_steps, save_summary_steps=FLAGS.log_steps)
DeepFM = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, params=model_params, config=config)
if FLAGS.task_type == 'train':
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size))
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)
tf.estimator.train_and_evaluate(DeepFM, train_spec, eval_spec)
elif FLAGS.task_type == 'eval':
DeepFM.evaluate(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size))
elif FLAGS.task_type == 'infer':
preds = DeepFM.predict(input_fn=lambda: input_fn(te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys="prob")
with open(FLAGS.data_dir+"/pred.txt", "w") as fo:
for prob in preds:
fo.write("%f\n" % (prob['prob']))
elif FLAGS.task_type == 'export':
#feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
#feature_spec = {
# 'feat_ids': tf.FixedLenFeature(dtype=tf.int64, shape=[None, FLAGS.field_size]),
# 'feat_vals': tf.FixedLenFeature(dtype=tf.float32, shape=[None, FLAGS.field_size])
#}
#serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
feature_spec = {
'feat_ids': tf.placeholder(dtype=tf.int64, shape=[None, FLAGS.field_size], name='feat_ids'),
'feat_vals': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.field_size], name='feat_vals')
}
serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
DeepFM.export_savedmodel(FLAGS.servable_model_dir, serving_input_receiver_fn)
if __name__ == "__main__":
#------check Arguments------
if FLAGS.dt_dir == "":
FLAGS.dt_dir = (date.today() + timedelta(-1)).strftime('%Y%m%d')
FLAGS.model_dir = FLAGS.model_dir + FLAGS.dt_dir
#FLAGS.data_dir = FLAGS.data_dir + FLAGS.dt_dir
print('task_type ', FLAGS.task_type)
print('model_dir ', FLAGS.model_dir)
print('data_dir ', FLAGS.data_dir)
print('dt_dir ', FLAGS.dt_dir)
print('num_epochs ', FLAGS.num_epochs)
print('feature_size ', FLAGS.feature_size)
print('field_size ', FLAGS.field_size)
print('embedding_size ', FLAGS.embedding_size)
print('batch_size ', FLAGS.batch_size)
print('deep_layers ', FLAGS.deep_layers)
print('dropout ', FLAGS.dropout)
print('loss_type ', FLAGS.loss_type)
print('optimizer ', FLAGS.optimizer)
print('learning_rate ', FLAGS.learning_rate)
print('batch_norm_decay ', FLAGS.batch_norm_decay)
print('batch_norm ', FLAGS.batch_norm)
print('l2_reg ', FLAGS.l2_reg)
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
!python DeepFM.py --task_type=train \
--learning_rate=0.0005 \
--optimizer=Adam \
--num_epochs=1 \
--batch_size=256 \
--field_size=39 \
--feature_size=117581 \
--deep_layers=400,400,400 \
--dropout=0.5,0.5,0.5 \
--log_steps=1000 \
--num_threads=8 \
--model_dir=./criteo_model/DeepFM \
--data_dir=./criteo_data
###Output
('task_type ', 'train')
('model_dir ', './criteo_model20180503')
('data_dir ', './criteo_data')
('dt_dir ', '20180503')
('num_epochs ', 1)
('feature_size ', 117581)
('field_size ', 39)
('embedding_size ', 32)
('batch_size ', 256)
('deep_layers ', '400,400,400')
('dropout ', '0.5,0.5,0.5')
('loss_type ', 'log_loss')
('optimizer ', 'Adam')
('learning_rate ', 0.0005)
('batch_norm_decay ', 0.9)
('batch_norm ', False)
('l2_reg ', 0.0001)
('tr_files:', ['./criteo_data/tr.libsvm'])
('va_files:', ['./criteo_data/va.libsvm'])
('te_files:', ['./criteo_data/te.libsvm'])
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': device_count {
key: "CPU"
value: 8
}
device_count {
key: "GPU"
}
, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x537c4d0>, '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 1000, '_model_dir': './criteo_model', '_save_summary_steps': 1000}
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 1200 secs (eval_spec.throttle_secs) or training is finished.
('Parsing', ['./criteo_data/tr.libsvm'])
INFO:tensorflow:Create CheckpointSaverHook.
2018-05-04 23:34:53.147375: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
INFO:tensorflow:Saving checkpoints for 1 into ./criteo_model/model.ckpt.
INFO:tensorflow:loss = 0.6947804, step = 1
INFO:tensorflow:loss = 0.51585126, step = 101 (4.948 sec)
INFO:tensorflow:loss = 0.4950318, step = 201 (4.408 sec)
INFO:tensorflow:loss = 0.5462832, step = 301 (4.357 sec)
INFO:tensorflow:loss = 0.5671505, step = 401 (4.368 sec)
INFO:tensorflow:loss = 0.45424744, step = 501 (4.300 sec)
INFO:tensorflow:loss = 0.5399899, step = 601 (4.274 sec)
INFO:tensorflow:loss = 0.49540266, step = 701 (4.234 sec)
INFO:tensorflow:loss = 0.5175852, step = 801 (4.294 sec)
INFO:tensorflow:loss = 0.4686305, step = 901 (4.314 sec)
INFO:tensorflow:global_step/sec: 22.8576
INFO:tensorflow:loss = 0.5371931, step = 1001 (4.254 sec)
INFO:tensorflow:loss = 0.49340367, step = 1101 (4.243 sec)
INFO:tensorflow:loss = 0.49719507, step = 1201 (4.346 sec)
INFO:tensorflow:loss = 0.48593232, step = 1301 (4.225 sec)
INFO:tensorflow:loss = 0.48725832, step = 1401 (4.238 sec)
INFO:tensorflow:loss = 0.4386774, step = 1501 (4.361 sec)
INFO:tensorflow:loss = 0.49065983, step = 1601 (4.312 sec)
INFO:tensorflow:loss = 0.53164876, step = 1701 (4.272 sec)
INFO:tensorflow:loss = 0.40944415, step = 1801 (4.286 sec)
INFO:tensorflow:loss = 0.521611, step = 1901 (4.270 sec)
INFO:tensorflow:global_step/sec: 23.327
INFO:tensorflow:loss = 0.49082595, step = 2001 (4.317 sec)
INFO:tensorflow:loss = 0.50453734, step = 2101 (4.302 sec)
INFO:tensorflow:loss = 0.49503702, step = 2201 (4.369 sec)
INFO:tensorflow:loss = 0.45685932, step = 2301 (4.326 sec)
INFO:tensorflow:loss = 0.47562104, step = 2401 (4.326 sec)
INFO:tensorflow:loss = 0.5106457, step = 2501 (4.366 sec)
INFO:tensorflow:loss = 0.4949795, step = 2601 (4.408 sec)
INFO:tensorflow:loss = 0.4684176, step = 2701 (4.442 sec)
INFO:tensorflow:loss = 0.43745354, step = 2801 (4.457 sec)
INFO:tensorflow:loss = 0.48600715, step = 2901 (4.490 sec)
INFO:tensorflow:global_step/sec: 22.7801
INFO:tensorflow:loss = 0.4853104, step = 3001 (4.412 sec)
INFO:tensorflow:loss = 0.49764964, step = 3101 (4.420 sec)
INFO:tensorflow:loss = 0.4432894, step = 3201 (4.496 sec)
INFO:tensorflow:loss = 0.46213925, step = 3301 (4.479 sec)
INFO:tensorflow:loss = 0.4637582, step = 3401 (4.582 sec)
INFO:tensorflow:loss = 0.46756223, step = 3501 (4.504 sec)
INFO:tensorflow:loss = 0.46732077, step = 3601 (4.464 sec)
###Markdown
NFMreference:[从ctr预估问题看看f(x)设计—DNN篇](https://zhuanlan.zhihu.com/p/28202287) [深度学习在CTR预估中的应用](https://zhuanlan.zhihu.com/p/35484389)NFM = LR + Embedding + Bi-Interaction Pooling + MLP对不同特征做相同维数的embedding向量。接下来,这些embedding向量两两做element-wise的相乘运算得到B-interaction layer。(element-wide运算举例: (1,2,3)element-wide相乘(4,5,6)结果是(4,10,18)。)B-interaction Layer 得到的是一个和embedding维数相同的向量。然后后面接几个隐藏层输出结果。大家思考一下,如果B-interaction layer后面不接隐藏层,直接把向量的元素相加输出结果, 就是一个FM,现在后面增加了隐藏层,相当于做了更高阶的FM,更加增强了非线性表达能力。NFM 在 embedding 做了 bi-interaction 操作来做特征的交叉处理,优点是网络参数从 n 直接压缩到 k(比 FNN 和 DeepFM 的 f\*k 还少),降低了网络复杂度,能够加速网络的训练得到模型;但同时这种方法也可能带来较大的信息损失。
###Code
# %load NFM.py
#!/usr/bin/env python
"""
TensorFlow Implementation of <<Neural Factorization Machines for Sparse Predictive Analytics>> with the fellowing features:
#1 Input pipline using Dataset high level API, Support parallel and prefetch reading
#2 Train pipline using Coustom Estimator by rewriting model_fn
#3 Support distincted training by TF_CONFIG
#4 Support export model for TensorFlow Serving
by lambdaji
"""
#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function
#import argparse
import shutil
#import sys
import os
import json
import glob
from datetime import date, timedelta
from time import time
#import gc
#from multiprocessing import Process
#import math
import random
import pandas as pd
import numpy as np
import tensorflow as tf
#################### CMD Arguments ####################
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("dist_mode", 0, "distribuion mode {0-loacal, 1-single_dist, 2-multi_dist}")
tf.app.flags.DEFINE_string("ps_hosts", '', "Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", '', "Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", '', "One of 'ps', 'worker'")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of task within the job")
tf.app.flags.DEFINE_integer("num_threads", 16, "Number of threads")
tf.app.flags.DEFINE_integer("feature_size", 0, "Number of features")
tf.app.flags.DEFINE_integer("field_size", 0, "Number of fields")
tf.app.flags.DEFINE_integer("embedding_size", 64, "Embedding size")
tf.app.flags.DEFINE_integer("num_epochs", 10, "Number of epochs")
tf.app.flags.DEFINE_integer("batch_size", 128, "Number of batch size")
tf.app.flags.DEFINE_integer("log_steps", 1000, "save summary every steps")
tf.app.flags.DEFINE_float("learning_rate", 0.05, "learning rate")
tf.app.flags.DEFINE_float("l2_reg", 0.001, "L2 regularization")
tf.app.flags.DEFINE_string("loss_type", 'log_loss', "loss type {square_loss, log_loss}")
tf.app.flags.DEFINE_string("optimizer", 'Adam', "optimizer type {Adam, Adagrad, GD, Momentum}")
tf.app.flags.DEFINE_string("deep_layers", '128,64', "deep layers")
tf.app.flags.DEFINE_string("dropout", '0.5,0.8,0.8', "dropout rate")
tf.app.flags.DEFINE_boolean("batch_norm", False, "perform batch normaization (True or False)")
tf.app.flags.DEFINE_float("batch_norm_decay", 0.9, "decay for the moving average(recommend trying decay=0.9)")
tf.app.flags.DEFINE_string("data_dir", '', "data dir")
tf.app.flags.DEFINE_string("dt_dir", '', "data dt partition")
tf.app.flags.DEFINE_string("model_dir", '', "model check point dir")
tf.app.flags.DEFINE_string("servable_model_dir", '', "export servable model for TensorFlow Serving")
tf.app.flags.DEFINE_string("task_type", 'train', "task type {train, infer, eval, export}")
tf.app.flags.DEFINE_boolean("clear_existing_model", False, "clear existing model or not")
#1 1:0.5 2:0.03519 3:1 4:0.02567 7:0.03708 8:0.01705 9:0.06296 10:0.18185 11:0.02497 12:1 14:0.02565 15:0.03267 17:0.0247 18:0.03158 20:1 22:1 23:0.13169 24:0.02933 27:0.18159 31:0.0177 34:0.02888 38:1 51:1 63:1 132:1 164:1 236:1
def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
print('Parsing', filenames)
def decode_libsvm(line):
#columns = tf.decode_csv(value, record_defaults=CSV_COLUMN_DEFAULTS)
#features = dict(zip(CSV_COLUMNS, columns))
#labels = features.pop(LABEL_COLUMN)
columns = tf.string_split([line], ' ')
labels = tf.string_to_number(columns.values[0], out_type=tf.float32)
splits = tf.string_split(columns.values[1:], ':')
id_vals = tf.reshape(splits.values,splits.dense_shape)
feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1)
feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)
feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)
return {"feat_ids": feat_ids, "feat_vals": feat_vals}, labels
# Extract lines from input files using the Dataset API, can pass one filename or filename list
dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(500000) # multi-thread pre-process then prefetch
# Randomizes input using a window of 256 elements (read into memory)
if perform_shuffle:
dataset = dataset.shuffle(buffer_size=256)
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size) # Batch size to use
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
#return tf.reshape(batch_ids,shape=[-1,field_size]), tf.reshape(batch_vals,shape=[-1,field_size]), batch_labels
return batch_features, batch_labels
def model_fn(features, labels, mode, params):
"""Bulid Model function f(x) for Estimator."""
#------hyperparameters----
field_size = params["field_size"]
feature_size = params["feature_size"]
embedding_size = params["embedding_size"]
l2_reg = params["l2_reg"]
learning_rate = params["learning_rate"]
#optimizer = params["optimizer"]
layers = map(int, params["deep_layers"].split(','))
dropout = map(float, params["dropout"].split(','))
#------bulid weights------
Global_Bias = tf.get_variable(name='bias', shape=[1], initializer=tf.constant_initializer(0.0))
Feat_Bias = tf.get_variable(name='linear', shape=[feature_size], initializer=tf.glorot_normal_initializer())
Feat_Emb = tf.get_variable(name='emb', shape=[feature_size,embedding_size], initializer=tf.glorot_normal_initializer())
#------build feaure-------
feat_ids = features['feat_ids']
feat_ids = tf.reshape(feat_ids,shape=[-1,field_size])
feat_vals = features['feat_vals']
feat_vals = tf.reshape(feat_vals,shape=[-1,field_size])
#------build f(x)------
with tf.variable_scope("Linear-part"):
feat_wgts = tf.nn.embedding_lookup(Feat_Bias, feat_ids) # None * F * 1
y_linear = tf.reduce_sum(tf.multiply(feat_wgts, feat_vals),1)
with tf.variable_scope("BiInter-part"):
embeddings = tf.nn.embedding_lookup(Feat_Emb, feat_ids) # None * F * K
feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1])
embeddings = tf.multiply(embeddings, feat_vals) # vij * xi
sum_square_emb = tf.square(tf.reduce_sum(embeddings,1))
square_sum_emb = tf.reduce_sum(tf.square(embeddings),1)
deep_inputs = 0.5*tf.subtract(sum_square_emb, square_sum_emb) # None * K
with tf.variable_scope("Deep-part"):
if mode == tf.estimator.ModeKeys.TRAIN:
train_phase = True
else:
train_phase = False
if mode == tf.estimator.ModeKeys.TRAIN:
deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[0]) # None * K
for i in range(len(layers)):
deep_inputs = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=layers[i], \
weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='mlp%d' % i)
if FLAGS.batch_norm:
deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i) #放在RELU之后 https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md#bn----before-or-after-relu
if mode == tf.estimator.ModeKeys.TRAIN:
deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[i]) #Apply Dropout after all BN layers and set dropout=0.8(drop_ratio=0.2)
#deep_inputs = tf.layers.dropout(inputs=deep_inputs, rate=dropout[i], training=mode == tf.estimator.ModeKeys.TRAIN)
y_deep = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \
weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='deep_out')
y_d = tf.reshape(y_deep,shape=[-1])
with tf.variable_scope("NFM-out"):
#y_bias = Global_Bias * tf.ones_like(labels, dtype=tf.float32) # None * 1 warning;这里不能用label,否则调用predict/export函数会出错,train/evaluate正常;初步判断estimator做了优化,用不到label时不传
y_bias = Global_Bias * tf.ones_like(y_d, dtype=tf.float32) # None * 1
y = y_bias + y_linear + y_d
pred = tf.sigmoid(y)
predictions={"prob": pred}
export_outputs = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}
# Provide an estimator spec for `ModeKeys.PREDICT`
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs=export_outputs)
#------bulid loss------
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels)) + \
l2_reg * tf.nn.l2_loss(Feat_Bias) + l2_reg * tf.nn.l2_loss(Feat_Emb)
# Provide an estimator spec for `ModeKeys.EVAL`
eval_metric_ops = {
"auc": tf.metrics.auc(labels, pred)
}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
eval_metric_ops=eval_metric_ops)
#------bulid optimizer------
if FLAGS.optimizer == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8)
elif FLAGS.optimizer == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate, initial_accumulator_value=1e-8)
elif FLAGS.optimizer == 'Momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
# Provide an estimator spec for `ModeKeys.TRAIN` modes
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
# Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
#return tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# train_op=train_op,
# predictions={"prob": pred},
# eval_metric_ops=eval_metric_ops)
def batch_norm_layer(x, train_phase, scope_bn):
bn_train = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=True, reuse=None, scope=scope_bn)
bn_infer = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=False, reuse=True, scope=scope_bn)
z = tf.cond(tf.cast(train_phase, tf.bool), lambda: bn_train, lambda: bn_infer)
return z
def set_dist_env():
if FLAGS.dist_mode == 1: # 本地分布式测试模式1 chief, 1 ps, 1 evaluator
ps_hosts = FLAGS.ps_hosts.split(',')
chief_hosts = FLAGS.chief_hosts.split(',')
task_index = FLAGS.task_index
job_name = FLAGS.job_name
print('ps_host', ps_hosts)
print('chief_hosts', chief_hosts)
print('job_name', job_name)
print('task_index', str(task_index))
# 无worker参数
tf_config = {
'cluster': {'chief': chief_hosts, 'ps': ps_hosts},
'task': {'type': job_name, 'index': task_index }
}
print(json.dumps(tf_config))
os.environ['TF_CONFIG'] = json.dumps(tf_config)
elif FLAGS.dist_mode == 2: # 集群分布式模式
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
chief_hosts = worker_hosts[0:1] # get first worker as chief
worker_hosts = worker_hosts[2:] # the rest as worker
task_index = FLAGS.task_index
job_name = FLAGS.job_name
print('ps_host', ps_hosts)
print('worker_host', worker_hosts)
print('chief_hosts', chief_hosts)
print('job_name', job_name)
print('task_index', str(task_index))
# use #worker=0 as chief
if job_name == "worker" and task_index == 0:
job_name = "chief"
# use #worker=1 as evaluator
if job_name == "worker" and task_index == 1:
job_name = 'evaluator'
task_index = 0
# the others as worker
if job_name == "worker" and task_index > 1:
task_index -= 2
tf_config = {
'cluster': {'chief': chief_hosts, 'worker': worker_hosts, 'ps': ps_hosts},
'task': {'type': job_name, 'index': task_index }
}
print(json.dumps(tf_config))
os.environ['TF_CONFIG'] = json.dumps(tf_config)
def main(_):
tr_files = glob.glob("%s/tr*libsvm" % FLAGS.data_dir)
random.shuffle(tr_files)
print("tr_files:", tr_files)
va_files = glob.glob("%s/va*libsvm" % FLAGS.data_dir)
print("va_files:", va_files)
te_files = glob.glob("%s/te*libsvm" % FLAGS.data_dir)
print("te_files:", te_files)
if FLAGS.clear_existing_model:
try:
shutil.rmtree(FLAGS.model_dir)
except Exception as e:
print(e, "at clear_existing_model")
else:
print("existing model cleaned at %s" % FLAGS.model_dir)
set_dist_env()
model_params = {
"field_size": FLAGS.field_size,
"feature_size": FLAGS.feature_size,
"embedding_size": FLAGS.embedding_size,
"learning_rate": FLAGS.learning_rate,
"l2_reg": FLAGS.l2_reg,
"deep_layers": FLAGS.deep_layers,
"dropout": FLAGS.dropout
}
config = tf.estimator.RunConfig().replace(session_config = tf.ConfigProto(device_count={'GPU':0, 'CPU':FLAGS.num_threads}),
log_step_count_steps=FLAGS.log_steps, save_summary_steps=FLAGS.log_steps)
DeepFM = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, params=model_params, config=config)
if FLAGS.task_type == 'train':
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size))
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)
tf.estimator.train_and_evaluate(DeepFM, train_spec, eval_spec)
elif FLAGS.task_type == 'eval':
DeepFM.evaluate(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size))
elif FLAGS.task_type == 'infer':
preds = DeepFM.predict(input_fn=lambda: input_fn(te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys="prob")
with open(FLAGS.data_dir+"/pred.txt", "w") as fo:
for prob in preds:
fo.write("%f\n" % (prob['prob']))
elif FLAGS.task_type == 'export':
#feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
#feature_spec = {
# 'feat_ids': tf.FixedLenFeature(dtype=tf.int64, shape=[None, FLAGS.field_size]),
# 'feat_vals': tf.FixedLenFeature(dtype=tf.float32, shape=[None, FLAGS.field_size])
#}
#serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
feature_spec = {
'feat_ids': tf.placeholder(dtype=tf.int64, shape=[None, FLAGS.field_size], name='feat_ids'),
'feat_vals': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.field_size], name='feat_vals')
}
serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
DeepFM.export_savedmodel(FLAGS.servable_model_dir, serving_input_receiver_fn)
if __name__ == "__main__":
#------check Arguments------
if FLAGS.dt_dir == "":
FLAGS.dt_dir = (date.today() + timedelta(-1)).strftime('%Y%m%d')
FLAGS.model_dir = FLAGS.model_dir + FLAGS.dt_dir
#FLAGS.data_dir = FLAGS.data_dir + FLAGS.dt_dir
print('task_type ', FLAGS.task_type)
print('model_dir ', FLAGS.model_dir)
print('data_dir ', FLAGS.data_dir)
print('dt_dir ', FLAGS.dt_dir)
print('num_epochs ', FLAGS.num_epochs)
print('feature_size ', FLAGS.feature_size)
print('field_size ', FLAGS.field_size)
print('embedding_size ', FLAGS.embedding_size)
print('batch_size ', FLAGS.batch_size)
print('deep_layers ', FLAGS.deep_layers)
print('dropout ', FLAGS.dropout)
print('loss_type ', FLAGS.loss_type)
print('optimizer ', FLAGS.optimizer)
print('learning_rate ', FLAGS.learning_rate)
print('l2_reg ', FLAGS.l2_reg)
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
!python NFM.py --task_type=train \
--learning_rate=0.0005 \
--optimizer=Adam \
--num_epochs=1 \
--batch_size=256 \
--field_size=39 \
--feature_size=117581 \
--deep_layers=400,400,400 \
--dropout=0.5,0.5,0.5 \
--log_steps=1000 \
--num_threads=8 \
--model_dir=./criteo_model/NFM \
--data_dir=./criteo_data
###Output
('task_type ', 'train')
('model_dir ', './criteo_model/NFM20180504')
('data_dir ', './criteo_data')
('dt_dir ', '20180504')
('num_epochs ', 1)
('feature_size ', 117581)
('field_size ', 39)
('embedding_size ', 64)
('batch_size ', 256)
('deep_layers ', '400,400,400')
('dropout ', '0.5,0.5,0.5')
('loss_type ', 'log_loss')
('optimizer ', 'Adam')
('learning_rate ', 0.0005)
('l2_reg ', 0.001)
('tr_files:', ['./criteo_data/tr.libsvm'])
('va_files:', ['./criteo_data/va.libsvm'])
('te_files:', ['./criteo_data/te.libsvm'])
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': device_count {
key: "CPU"
value: 8
}
device_count {
key: "GPU"
}
, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x6455490>, '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 1000, '_model_dir': './criteo_model/NFM', '_save_summary_steps': 1000}
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 1200 secs (eval_spec.throttle_secs) or training is finished.
('Parsing', ['./criteo_data/tr.libsvm'])
INFO:tensorflow:Create CheckpointSaverHook.
2018-05-05 09:12:20.744054: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
INFO:tensorflow:Saving checkpoints for 1 into ./criteo_model/NFM/model.ckpt.
INFO:tensorflow:loss = 0.74344236, step = 1
INFO:tensorflow:loss = 0.52700615, step = 101 (12.126 sec)
INFO:tensorflow:loss = 0.53884023, step = 201 (8.622 sec)
INFO:tensorflow:loss = 0.5741194, step = 301 (8.680 sec)
INFO:tensorflow:loss = 0.59511054, step = 401 (8.699 sec)
INFO:tensorflow:loss = 0.47535682, step = 501 (8.683 sec)
INFO:tensorflow:loss = 0.5811361, step = 601 (9.074 sec)
INFO:tensorflow:loss = 0.5244339, step = 701 (9.141 sec)
INFO:tensorflow:loss = 0.5584926, step = 801 (9.293 sec)
INFO:tensorflow:loss = 0.51474106, step = 901 (9.456 sec)
INFO:tensorflow:global_step/sec: 10.8086
INFO:tensorflow:loss = 0.5706619, step = 1001 (8.749 sec)
INFO:tensorflow:loss = 0.523185, step = 1101 (9.152 sec)
INFO:tensorflow:loss = 0.5341173, step = 1201 (8.163 sec)
INFO:tensorflow:loss = 0.5158627, step = 1301 (8.788 sec)
INFO:tensorflow:loss = 0.51566017, step = 1401 (8.700 sec)
INFO:tensorflow:loss = 0.4592861, step = 1501 (8.962 sec)
INFO:tensorflow:loss = 0.50802827, step = 1601 (8.990 sec)
INFO:tensorflow:loss = 0.5538678, step = 1701 (8.596 sec)
INFO:tensorflow:loss = 0.4346152, step = 1801 (8.267 sec)
INFO:tensorflow:loss = 0.5406091, step = 1901 (8.907 sec)
INFO:tensorflow:global_step/sec: 11.4221
INFO:tensorflow:loss = 0.5177407, step = 2001 (9.026 sec)
INFO:tensorflow:loss = 0.50947416, step = 2101 (10.118 sec)
INFO:tensorflow:loss = 0.5290449, step = 2201 (8.635 sec)
INFO:tensorflow:loss = 0.48367974, step = 2301 (9.689 sec)
INFO:tensorflow:loss = 0.5103478, step = 2401 (9.785 sec)
INFO:tensorflow:loss = 0.5290227, step = 2501 (9.748 sec)
INFO:tensorflow:loss = 0.5219102, step = 2601 (9.889 sec)
INFO:tensorflow:loss = 0.5131693, step = 2701 (10.787 sec)
INFO:tensorflow:loss = 0.47013655, step = 2801 (11.150 sec)
INFO:tensorflow:loss = 0.5133655, step = 2901 (12.453 sec)
INFO:tensorflow:global_step/sec: 9.68192
INFO:tensorflow:loss = 0.5253961, step = 3001 (11.027 sec)
INFO:tensorflow:loss = 0.53593737, step = 3101 (10.576 sec)
INFO:tensorflow:loss = 0.47377995, step = 3201 (9.975 sec)
INFO:tensorflow:loss = 0.5179897, step = 3301 (9.655 sec)
INFO:tensorflow:loss = 0.5014092, step = 3401 (8.827 sec)
INFO:tensorflow:loss = 0.50651914, step = 3501 (9.877 sec)
INFO:tensorflow:loss = 0.4893608, step = 3601 (7.170 sec)
INFO:tensorflow:loss = 0.5037479, step = 3701 (7.128 sec)
INFO:tensorflow:loss = 0.46921813, step = 3801 (7.062 sec)
INFO:tensorflow:loss = 0.5224898, step = 3901 (6.815 sec)
INFO:tensorflow:global_step/sec: 11.7165
INFO:tensorflow:loss = 0.5555479, step = 4001 (8.265 sec)
INFO:tensorflow:loss = 0.53638494, step = 4101 (9.037 sec)
INFO:tensorflow:loss = 0.58234245, step = 4201 (8.601 sec)
INFO:tensorflow:loss = 0.57939863, step = 4301 (8.564 sec)
INFO:tensorflow:loss = 0.51434916, step = 4401 (8.940 sec)
INFO:tensorflow:loss = 0.5549449, step = 4501 (8.833 sec)
INFO:tensorflow:loss = 0.5062487, step = 4601 (8.651 sec)
INFO:tensorflow:loss = 0.5529063, step = 4701 (8.658 sec)
INFO:tensorflow:loss = 0.49861303, step = 4801 (8.808 sec)
INFO:tensorflow:loss = 0.54094946, step = 4901 (8.782 sec)
INFO:tensorflow:global_step/sec: 11.413
INFO:tensorflow:loss = 0.49571908, step = 5001 (8.745 sec)
INFO:tensorflow:loss = 0.5437416, step = 5101 (8.432 sec)
INFO:tensorflow:loss = 0.5013172, step = 5201 (8.366 sec)
INFO:tensorflow:loss = 0.50875455, step = 5301 (8.017 sec)
INFO:tensorflow:loss = 0.5869225, step = 5401 (8.335 sec)
INFO:tensorflow:loss = 0.5402778, step = 5501 (8.121 sec)
INFO:tensorflow:loss = 0.52757925, step = 5601 (8.187 sec)
INFO:tensorflow:loss = 0.48195118, step = 5701 (7.991 sec)
INFO:tensorflow:loss = 0.4779031, step = 5801 (7.904 sec)
INFO:tensorflow:loss = 0.5278434, step = 5901 (7.916 sec)
INFO:tensorflow:global_step/sec: 12.3543
INFO:tensorflow:loss = 0.5329895, step = 6001 (7.673 sec)
INFO:tensorflow:loss = 0.5151729, step = 6101 (7.622 sec)
INFO:tensorflow:loss = 0.62112814, step = 6201 (7.493 sec)
INFO:tensorflow:loss = 0.48736763, step = 6301 (7.491 sec)
INFO:tensorflow:loss = 0.45068923, step = 6401 (7.353 sec)
INFO:tensorflow:loss = 0.51698387, step = 6501 (7.221 sec)
INFO:tensorflow:loss = 0.5078758, step = 6601 (7.112 sec)
INFO:tensorflow:loss = 0.53784084, step = 6701 (7.051 sec)
INFO:tensorflow:loss = 0.568355, step = 6801 (6.848 sec)
INFO:tensorflow:Saving checkpoints for 6863 into ./criteo_model/NFM/model.ckpt.
INFO:tensorflow:loss = 0.5869765, step = 6901 (7.007 sec)
INFO:tensorflow:global_step/sec: 13.877
INFO:tensorflow:loss = 0.50776565, step = 7001 (6.864 sec)
INFO:tensorflow:Saving checkpoints for 7034 into ./criteo_model/NFM/model.ckpt.
INFO:tensorflow:Loss for final step: 0.66966015.
('Parsing', ['./criteo_data/va.libsvm'])
INFO:tensorflow:Starting evaluation at 2018-05-05-01:22:35
INFO:tensorflow:Restoring parameters from ./criteo_model/NFM/model.ckpt-7034
INFO:tensorflow:Finished evaluation at 2018-05-05-01:22:58
INFO:tensorflow:Saving dict for global step 7034: auc = 0.7614266, global_step = 7034, loss = 0.50850546
('Parsing', ['./criteo_data/tr.libsvm'])
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from ./criteo_model/NFM/model.ckpt-7034
INFO:tensorflow:Saving checkpoints for 7035 into ./criteo_model/NFM/model.ckpt.
INFO:tensorflow:loss = 0.53954387, step = 7035
INFO:tensorflow:loss = 0.506534, step = 7135 (10.071 sec)
INFO:tensorflow:loss = 0.5184156, step = 7235 (8.270 sec)
INFO:tensorflow:loss = 0.5448781, step = 7335 (8.497 sec)
INFO:tensorflow:loss = 0.58426636, step = 7435 (7.031 sec)
INFO:tensorflow:loss = 0.4775302, step = 7535 (7.547 sec)
INFO:tensorflow:loss = 0.57145935, step = 7635 (8.272 sec)
INFO:tensorflow:loss = 0.52330667, step = 7735 (7.936 sec)
INFO:tensorflow:loss = 0.52791095, step = 7835 (7.510 sec)
INFO:tensorflow:loss = 0.5160444, step = 7935 (7.842 sec)
INFO:tensorflow:global_step/sec: 12.3632
INFO:tensorflow:loss = 0.54860413, step = 8035 (7.911 sec)
INFO:tensorflow:loss = 0.5232839, step = 8135 (8.025 sec)
INFO:tensorflow:loss = 0.5313403, step = 8235 (7.744 sec)
INFO:tensorflow:loss = 0.5083723, step = 8335 (8.124 sec)
INFO:tensorflow:loss = 0.5127937, step = 8435 (7.833 sec)
INFO:tensorflow:loss = 0.45451465, step = 8535 (8.071 sec)
INFO:tensorflow:loss = 0.5148269, step = 8635 (8.239 sec)
INFO:tensorflow:loss = 0.5475166, step = 8735 (8.724 sec)
INFO:tensorflow:loss = 0.4436438, step = 8835 (7.380 sec)
INFO:tensorflow:loss = 0.527986, step = 8935 (6.852 sec)
INFO:tensorflow:global_step/sec: 12.8544
INFO:tensorflow:loss = 0.5004729, step = 9035 (6.800 sec)
INFO:tensorflow:loss = 0.5152983, step = 9135 (6.864 sec)
INFO:tensorflow:loss = 0.5443342, step = 9235 (6.611 sec)
INFO:tensorflow:loss = 0.48795786, step = 9335 (8.363 sec)
INFO:tensorflow:loss = 0.50839627, step = 9435 (8.677 sec)
INFO:tensorflow:loss = 0.53861755, step = 9535 (8.617 sec)
INFO:tensorflow:loss = 0.5102945, step = 9635 (8.498 sec)
INFO:tensorflow:loss = 0.49490649, step = 9735 (8.507 sec)
INFO:tensorflow:loss = 0.46887958, step = 9835 (8.529 sec)
INFO:tensorflow:loss = 0.5098571, step = 9935 (8.540 sec)
INFO:tensorflow:global_step/sec: 12.2224
INFO:tensorflow:loss = 0.5108617, step = 10035 (8.612 sec)
INFO:tensorflow:loss = 0.5259123, step = 10135 (8.646 sec)
INFO:tensorflow:loss = 0.49567312, step = 10235 (8.585 sec)
INFO:tensorflow:loss = 0.50952077, step = 10335 (9.826 sec)
INFO:tensorflow:loss = 0.50462925, step = 10435 (8.775 sec)
INFO:tensorflow:loss = 0.49131048, step = 10535 (8.954 sec)
INFO:tensorflow:loss = 0.51161194, step = 10635 (8.810 sec)
INFO:tensorflow:loss = 0.49189892, step = 10735 (8.735 sec)
INFO:tensorflow:loss = 0.45244217, step = 10835 (8.599 sec)
INFO:tensorflow:loss = 0.5231385, step = 10935 (8.917 sec)
INFO:tensorflow:global_step/sec: 11.2768
INFO:tensorflow:loss = 0.5461174, step = 11035 (8.829 sec)
INFO:tensorflow:loss = 0.5328863, step = 11135 (8.628 sec)
INFO:tensorflow:loss = 0.5831222, step = 11235 (8.300 sec)
INFO:tensorflow:loss = 0.5753766, step = 11335 (7.912 sec)
INFO:tensorflow:loss = 0.5203026, step = 11435 (8.527 sec)
INFO:tensorflow:loss = 0.55177057, step = 11535 (8.694 sec)
INFO:tensorflow:loss = 0.5044052, step = 11635 (8.612 sec)
INFO:tensorflow:loss = 0.54929847, step = 11735 (8.030 sec)
INFO:tensorflow:loss = 0.5100083, step = 11835 (8.013 sec)
INFO:tensorflow:loss = 0.54684854, step = 11935 (8.033 sec)
INFO:tensorflow:global_step/sec: 12.0882
INFO:tensorflow:loss = 0.49854505, step = 12035 (7.976 sec)
INFO:tensorflow:loss = 0.5296041, step = 12135 (8.307 sec)
INFO:tensorflow:loss = 0.5118119, step = 12235 (8.437 sec)
INFO:tensorflow:loss = 0.5206564, step = 12335 (8.180 sec)
INFO:tensorflow:loss = 0.56792927, step = 12435 (8.102 sec)
INFO:tensorflow:loss = 0.53202826, step = 12535 (8.284 sec)
###Markdown
DeepCTR充分利用图像带来的视觉影响,结合图像信息(通过CNN抽取)和业务特征一起判断点击率大小
###Code
# %load train_with_googlenet.py
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.layers import Embedding,Merge
from keras.callbacks import ModelCheckpoint
import keras
from keras.preprocessing import image
import numpy as np
import sys, os, re
from keras.applications.inception_v3 import InceptionV3, preprocess_input
#定义VGG卷积神经网络
def GoogleInceptionV3():
model = InceptionV3(weights='imagenet', include_top=False)
model.trainable = False
return model
#加载field和feature信息
def load_field_feature_meta(field_info_file):
field_feature_dic = {}
for line in open(field_info_file):
contents = line.strip().split("\t")
field_id = int(contents[1])
feature_count = int(contents[4])
field_feature_dic[field_id] = feature_count
return field_feature_dic
#CTR特征做embedding
def CTR_embedding(field_feature_dic):
emd = []
for field_id in range(len(field_feature_dic)):
# 先把离散特征embedding到稠密的层
tmp_model = Sequential()
#留一个位置给rare
input_dims = field_feature_dic[field_id]+1
if input_dims>16:
dense_dim = 16
else:
dense_dim = input_dims
tmp_model.add(Dense(dense_dim, input_dim=input_dims))
emd.append(tmp_model)
return emd
#总的网络结构
def full_network(field_feature_dic):
print "GoogleNet model loading"
googleNet_model = GoogleInceptionV3()
image_model = Flatten()(googleNet_model.outputs)
image_model = Dense(256)(image_model)
print "GoogleNet model loaded"
print "initialize embedding model"
print "loading fields info..."
emd = CTR_embedding(field_feature_dic)
print "embedding model done!"
print "initialize full model..."
full_model = Sequential()
full_input = [image_model] + emd
full_model.add(Merge(full_input, mode='concat'))
#批规范化
full_model.add(keras.layers.normalization.BatchNormalization())
#全连接层
full_model.add(Dense(128))
full_model.add(Dropout(0.4))
full_model.add(Activation('relu'))
#全连接层
full_model.add(Dense(128))
full_model.add(Dropout(0.4))
#最后的分类
full_model.add(Dense(1))
full_model.add(Activation('sigmoid'))
#编译整个模型
full_model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['binary_accuracy','fmeasure'])
#输出模型每一层的信息
full_model.summary()
return full_model
#图像预处理
def vgg_image_preoprocessing(image):
img = image.load_img(image, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
#CTR特征预处理
def ctr_feature_preprocessing(field_feature_string):
contents = field_feature_string.strip().split(" ")
feature_dic = {}
for content in contents:
field_id, feature_id, num = content.split(":")
feature_dic[int(field_id)] = int(feature_id)
return feature_dic
#产出用于训练的一个batch数据
def generate_batch_from_file(in_f, field_feature_dic, batch_num, skip_lines=0):
#初始化x和y
img_x = []
x = []
for field_id in range(len(field_feature_dic)):
x.append(np.zeros((batch_num, int(field_feature_dic[field_id])+1)))
y = [0.0]*batch_num
round_num = 1
while True:
line_count = 0
skips = 0
f = open(in_f)
for line in f:
if(skip_lines>0 and round_num==1):
if skips < skip_lines:
skips += 1
continue
if (line_count+1)%batch_num == 0:
contents = line.strip().split("\t")
img_name = "images/"+re.sub(r'.jpg.*', '.jpg', contents[1].split("/")[-1])
if not os.path.isfile(img_name):
continue
#初始化最后一个样本
try:
img_input = vgg_image_preoprocessing(img_name)
except:
continue
#图片特征填充
img_x.append(img_input)
#ctr特征填充
ctr_feature_dic = ctr_feature_preprocessing(contents[2])
for field_id in ctr_feature_dic:
x[field_id][line_count][ctr_feature_dic[field_id]] = 1.0
#填充y值
y[line_count] = int(contents[0])
#print "shape is", np.array(img_x).shape
yield ([np.array(img_x)]+x, y)
img_x = []
x = []
for field_id in range(len(field_feature_dic)):
x.append(np.zeros((batch_num, int(field_feature_dic[field_id])+1)))
y = [0.0]*batch_num
line_count = 0
else:
contents = line.strip().split("\t")
img_name = "images/"+re.sub(r'.jpg.*', '.jpg', contents[1].split("/")[-1])
if not os.path.isfile(img_name):
continue
try:
img_input = vgg_image_preoprocessing(img_name)
except:
continue
#图片特征填充
img_x.append(img_input)
#ctr特征填充
ctr_feature_dic = ctr_feature_preprocessing(contents[2])
for field_id in ctr_feature_dic:
x[field_id][line_count][ctr_feature_dic[field_id]] = 1.0
#填充y值
y[line_count] = int(contents[0])
line_count += 1
f.close()
round_num += 1
def train_network(skip_lines, batch_num, field_info_file, data_file, weight_file):
print "starting train whole network...\n"
field_feature_dic = load_field_feature_meta(field_info_file)
full_model = full_network(field_feature_dic)
if os.path.isfile(weight_file):
full_model.load_weights(weight_file)
checkpointer = ModelCheckpoint(filepath=weight_file, save_best_only=False, verbose=1, period=3)
full_model.fit_generator(generate_batch_from_file(data_file, field_feature_dic, batch_num, skip_lines),samples_per_epoch=1280, nb_epoch=100000, callbacks=[checkpointer])
if __name__ == '__main__':
skip_lines = sys.argv[1]
batch_num = sys.argv[2]
field_info_file = sys.argv[3]
data_file = sys.argv[4]
weight_file = sys.argv[5]
train_network(int(skip_lines), int(batch_num), field_info_file, data_file, weight_file)
###Output
_____no_output_____ |
Datalabs/Datalab02/2-IntermediatePythonI.ipynb | ###Markdown
Intermediate Python IPlease indicate your name below, since you will need to submit this notebook completed latest the day after the datalab.Don't forget to save your progress during the datalab to avoid any loss due to crashes.
###Code
name=''
###Output
_____no_output_____
###Markdown
During the previous datalab we have learnt about the basic built-in features of python: we learnt about variable and data types, loops and conditions. We also mentioned before that there are thousands of open source packages which can be imported to python. Several of these libraries are considered rather standard (and preinstalled when using anaconda). If we would like to create a package executing less standard tasks (for example parsing nuclear data formats), it is worth browsing the world wide web for pre-existing solutions before reinvintenting the wheel. There is for example the nuclear engineering toolkit called Pyne which provides a lot of functionality relevant for nuclear scientists. In the next two datalabs we are going to try some standard libraries, and get acquainted with various data formats.Libraries reviewed today are:- numpy which provides data containers and functions to perform numerical analysis, matrix operations, linear algebra and a lot more.- matplotlib which provides visualization tools from simple 2D plots to advanced 3D visualizations- scipy which provides scientific tools, for example fitting, ODE solvers, numerical integration- re which allows using regular expressions. A regular expression (often called regex or regexp) is a search pattern specified as a sequence of characters. The `re` package provides regexp operations. It is neither possible nor necessary to review all the features of these packages, rather we would like to provide you with the basic terminology and to cover the basic functionality of these packages. That said, what is imporant for now is to know about these packages, familiarize yourself with the basic principles of these tools. Later, in other exercises we are going to refer to functions and methods of these libraries. It is not required to keep all functionality of these libraries in your head, as you will advance you will often find yourself googling for solutions, reading through stackoverflow entries, and then with your basic understanding and the knowledge of the terminology you will be able to adapt the proposed solutions to your own needs.There are several other mainstream libraries of python which we will not cover at all in this course. For example symPy allows for symbolic operations, or sklearn provides an interface to machine learning models. Nevertheless, once you got a feeling for using python, it will be relatively easy for you to use any other package according to your future needs. NumpyWe saw before how to organize data in arrays called lists. Lists are extremely flexible, they can store data of different types (eg. `[1,'one',True]`), and one can easily create multidimensional lists, or matrices (eg. `[[1,2,3],[4,5,6],[7,8,9]]`). Nevertheless, in some sense this flexibility is also a drawback. If we perform operations on a list (for example by looping over its elements, and performing some simple mathetmatics), Python needs to check the type of each element. For a compiled code, when the type is declared beforehand such checks are not needed, hence it runs faster. Numpy (numerical python) offers a more efficient data structure called *numpy array* (which stores elements of the same type) and provides related data operations. Numpy arrays are important building blocks of several other tools (eg. data science applications), and they make life easier, hence it is valuable to understand how to use them.Besides the numpy array, numpy contains a lot of useful functions and libraries (operations and linear algebra on the arrays, random number generators etc), which we will not cover now, but we will use them later on. The common way to import numpy is with
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Here we state that we want to `import numpy`, and later we will refer to it as `np`, meaning that we can access functions within numpy with `np.functionName()`. One can use different reference names as well, or just simply `import numpy`, and refer to functions with `numpy.functionName()`. (We could import directly functions from numpy, for example `from numpy import cos`, or we could import everything from numpy with `from numpy import *`, however this is generally not adviced since some libraries might have functions with the same name, like there is a `cos` implementation both in `math` and numpy, and the two implementations do not have the same functionality (eg. the numpy version can use lists as an input, the math version cannot).Let's look at numpy arrays.What would you expect when you execute this?
###Code
a=np.array([1,2,3])
a[0]=2.3
a
b=np.array([0,1.2,'one'])
print(b)
b[0]=42.0
b
###Output
_____no_output_____
###Markdown
In the first case, python interprets the definition as the array only stores integers. Upon changing an element it will convert the float to an integer. You have to be careful, since we saw that in normal python a list like `[0,0,0]` can be updated with floats, but `np.array([0,0,0])` cannot. Nevertheless we will see that initializing an array with zeros should be done with `np.zeros()`.In the second case essentially the same thing happens. Python cannot interpret `'one'` as a number, instead it broadcasts the string type to all other elements. Upon modifying an element, it is automatically converted to string.One can however directly give a type to a numpy array (although when working with floats it is often just better to write out the decimals).
###Code
c=np.array([1, 2, 3], dtype='float32')
print(c)
c[0]=2.3
c
###Output
_____no_output_____
###Markdown
Numpy provides several functions to initialize arrays (even 2D), which are probably familiar to Matlab users:
###Code
a=np.zeros(3)
print(a)
A=np.zeros((3,3))
print(A)
b=np.ones(3)
print(b)
c=np.linspace(-5,5,11)
print(c)
print(A.shape)
print(len(c))
###Output
_____no_output_____
###Markdown
Indexing of numpy arrays is similar as for lists, however for matrices we reach the elements with comma separated indices.
###Code
B=np.arange(9)
print(B)
print(B[2:5])
print(B[2:8:2]) #start:stop:step
C=B.reshape((3,3))
print(C)
print(C[:,1]) #second column
print(C[1,:]) #second row
print(C[1,1])
###Output
_____no_output_____
###Markdown
With numpy arrays we can easily perform aggregations, see the examples below. This is powerful, when needed to be performed on large arrays.
###Code
print(np.min(c))
print(np.max(c))
print(np.sum(c))
print(np.mean(c))
###Output
_____no_output_____
###Markdown
And we can perform operations with the arrays, and apply mathematical functions on the arrays. Notice that doing the same with lists would require loops.
###Code
print(c+100)
print(c+c)
print(3*c)
print(np.sin(c))
###Output
_____no_output_____
###Markdown
What makes numpy arrays even more powerful is that it is possible to mask them, ie. we can filter arrays based on conditions of other arrays
###Code
energy=np.logspace(-3,7,20) #20 values between 1e-3 and 1e7
energy
values=1/energy
values
energy<1.0
values[energy<1.0]
values*(energy<1.0) #we multiply with bools, so ones and zeros. Keeps the same shape.
###Output
_____no_output_____
###Markdown
Last, we can mention that numpy also has some functions to read data in columns. For example if we open the file '2-U235_xs_cap.txt', we can see that it contains two columns of data:``` Incident energy (ev) cross section (barn) 1.0E-5 5960.097 1.0625E-5 5782.129 1.125E-5 5619.206 1.1875E-5 5469.321 1.25E-5 5330.822 1.375E-5 5082.714 1.5E-5 4866.305 1.625E-5 4675.372 1.75E-5 4505.28 1.875E-5 4352.493 ...```We do not need to write a parser to read this file, instead we can use the `np.loadtxt()` function.
###Code
u235=np.loadtxt('02-sample.txt',skiprows=1) #we skip the first row, which is a header
u235
###Output
_____no_output_____
###Markdown
As said before, numpy has a lot of other functionality what we cannot cover in one single datalab, but later we will cover the ones needed for solving the exercises. MatplotlibWe saw previously how to do some mathematics with Python, but of course as aspiring scientists or engineers we would like to present our results visually as well. Matplotlib is a multi-platform data visualization library. It is designed to work with Numpy arrays. It will allow us to create 2D and 3D plots, and we can even create simple graphics as we will see. Matplotlib works well on several operating systems and it supports several output formats.The basic syntax of Matplotlib will feel familiar to Matlab users, and indeed it is straightforward to create basic plots. However, creating more difficult plots can feel sometimes frustrating.Besides Matplotlib, an other mainstream visualization library called Seaborn is also available in the Python universe. We are not going to use this in the course. Simple 2D plotLet us first simply look at the basic features by plotting the exponential decay curve. The most simple example would look like this.
###Code
import matplotlib.pyplot as plt
Thalf=14.9 #days
lam=np.log(2)/Thalf #1/days
N0=1e6 #number of atoms
time=np.linspace(0,200,50) #in days
atoms=N0*np.exp(-lam*time)
plt.figure()
plt.plot(time,atoms)
plt.show()
###Output
_____no_output_____
###Markdown
As an other simple example we can plot the content of the previously read file (which in fact is the neutron capture cross section of uranium-235. Here we use the `plt.loglog` function to have a logarithmic scale, and we set the labels of the axes.
###Code
plt.figure()
plt.loglog(u235[:,0],u235[:,1])
plt.xlabel('energy (eV)')
plt.ylabel('cross section (barn)')
plt.show()
###Output
_____no_output_____
###Markdown
In the following we will show how to customize parts of the figure. With `plt.figure` we create a canvas (and can set a size), then `plt.plot` will plot the specified X and Y arrays against each other. Here we can set the color, the type of the curve (solid or dashed etc.), the markers, and marker size or line width (`lw`), and we can add a label. If more `plt.plot` commands would be included, more curves would appear on the canvas. We can include a label, which will be shown if a legend is created (`plt.legend`)We can set the label on the x and y axis (`plt.xlabel`, `plt.ylabel`), and a title (`plt.title`). LaTeX expressions can be used where strings are given to describe content (`r'$a=3$'` will be rendered as LaTeX, `a=3` would be rendered as a normal string). With `xlim`, `ylim`, `xticks` and `yticks` we can set the boundaries of the plot and overwrite the default tick positions and labels.We can include a grid (`plt.grid`), and we can we can include custom horizontal and vertical lines (`plt.axhline` and `plt.axvline`).Note that all these commands are optional, and they have further parameters to tune. Almost anything is possible here. Don't be afraid to google for solutions when you want to make your plots pretty.The `matplotlib.pyplot` library has several more plotting options for scatter plots, errorbars, histograms, barplots, charts etc. You can also change the axis to logarithmic (and there are functions readily available for this which might sound familiar to previous Matlab users: `plt.loglog`, `plt.semilogx`, `plt.semilogy`. We will use some of these later in the course.
###Code
import numpy as np
import matplotlib.pyplot as plt
Thalf=14.9 #days
lam=np.log(2)/Thalf #1/days
N0=1e6 #number of atoms
time=np.linspace(0,200,1000) #in days
plt.figure(figsize=(10,4))
plt.plot(time,N0*np.exp(-lam*time),'k-',lw=2,label=r'$N_0e^{-\lambda t}$')
plt.title('Exponential Decay')
plt.xlabel('Time',fontsize=14)
plt.ylabel('Number of nuclei',fontsize=14)
plt.xticks([(i)*Thalf for i in range(1,14)], #list of values where ticks are added
[r'$%dT_{1/2}$'%i for i in range(1,14)]) #list of strings written at the ticks
plt.yticks([N0/i for i in [1,2,4,8,16]], #list of values where ticks are added
[r'$N_0/%d$'%i for i in [1,2,4,8,16]]) #list of strings written at the ticks
plt.xlim(0,200.0)
plt.ylim(0,N0)
plt.grid()
plt.axhline(N0/2,xmin=0,xmax=Thalf/200.0,color='r',ls='--') #xmin and max between 0 and 1; ls: linestyle
plt.axvline(Thalf,ymin=0,ymax=1/2,color='r',ls='--')
plt.axhline(N0/4,xmin=0,xmax=2*Thalf/200.0,color='r',ls='--') #xmin and max between 0 and 1
plt.axvline(2*Thalf,ymin=0,ymax=1/4,color='r',ls='--')
plt.axhline(N0/8,xmin=0,xmax=3*Thalf/200.0,color='r',ls='--') #xmin and max between 0 and 1
plt.axvline(3*Thalf,ymin=0,ymax=1/8,color='r',ls='--')
plt.axhline(N0/16,xmin=0,xmax=4*Thalf/200.0,color='r',ls='--') #xmin and max between 0 and 1
plt.axvline(4*Thalf,ymin=0,ymax=1/16,color='r',ls='--')
# Notice that we could have used a for loop for this part
#for i in range(1,5):
# plt.axhline(N0*0.5**i,xmin=0,xmax=i*Thalf/200.0,color='r',ls='--') #xmin and max between 0 and 1
# plt.axvline(i*Thalf,ymin=0,ymax=0.5**i,color='r',ls='--')
plt.legend()
#plt.savefig('myfigure.png',dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Simple 2D graphicsMatplotlib allows us to create graphics as well, we can define Polygons, Circles etc and place them on the canvas. We can also combine these graphics elements with data plots.In the following we will want to define a function which becomes zero on the edges of a hexagon (let's consider we have a hexagonal reactor, and the neutron population becomes zero on the boundary). We can use matplotlib to draw a hexagon to define our problem. Note, that here we also use `plt.annotate` to place text on our canvas, and we switched off the visibility of the right and top axes.
###Code
import matplotlib.pyplot as plt
R=10.0
fig, ax = plt.subplots()
#defining the hexagon through its corners.
polygon=plt.Polygon([[-R,0.0],[-1/2*R,np.sqrt(3)/2*R],[1/2*R,np.sqrt(3)/2*R],
[R,0.0],[1/2*R,-np.sqrt(3)/2*R],[-1/2*R,-np.sqrt(3)/2*R]],facecolor='white',edgecolor='black')
ax.add_artist(polygon)
plt.axvline(0.0,color='black') #we draw a vertical line for y axis
plt.axhline(0.0,color='black') #we draw a horizontal line for x axis
plt.annotate(r'$y=\sqrt{3}R-\sqrt{3}x$',(R*0.8,R/2)) #write this expression to that location
plt.annotate(r'$y=-\sqrt{3}R+\sqrt{3}x$',(R*0.8,-R/2))
plt.annotate(r'$-R$',(-R*1.4,1))
plt.annotate(r'$R$',(R*1.,1))
plt.annotate(r'$y=\frac{\sqrt{3}}{2}R$',(0.,R))
plt.xlim(-R*2.5,R*2.5) #limit the x-axis between -R*2.5, R*2.5
plt.ylim(-R*2.5,R*2.5)
plt.gca().set_aspect('equal', adjustable='box')
plt.gca().spines['right'].set_visible(False)#Switch off frame on right
plt.gca().spines['top'].set_visible(False)#Switch off frame on top
plt.show()
###Output
_____no_output_____
###Markdown
Simple 3D plotWe could construct a function from sines or cosines which disappears at the edge of a hexagon with a width $R$. One possible solution is$$f(x,y)=\cos\big(\frac{\pi}{\sqrt{3}R}y\big)\cdot \cos\big(\frac{\pi}{2\sqrt{3}R}(y+\sqrt{3}x)\big)\cdot \cos\big(\frac{\pi}{2\sqrt{3}R}(y-\sqrt{3}x)\big)$$.We first define a function `hexFunc()`, which will return zero if we are outside of the hexagon, and the mathematical function's value if we are inside. For this we will use a mask. **Note**: the backslash `\` character tells the python interpreter that the code is continued in the next line, this improves legibility.Then we can create a meshgrid, and evaluate the function at each of the grid points. Note that how concise numpy is, we didn't need to write any loop to evaluate the function at several grid points. Finally we can plot the surface with the `plot_surface` function. We can also plot a 2D projection/representation of the surface: with `contour` and `contourf` we can visualize contour lines and filled contour lines. With `imshow` the values are considered to be pixels of an image.
###Code
def hexFunc(x,y,R):
"""Function which disappears on the edge of a hexagon
Parameters
----------
X : ndarray
meshgrid X values.
Y : ndarray
meshgrid Y values
R : size of the hexagon
"""
z = np.zeros(x.shape)
mask = (y<=np.sqrt(3)/2*R) * \
(y>=-np.sqrt(3)/2*R) * \
(y<=np.sqrt(3)*R-np.sqrt(3)*x) * \
(y>=-np.sqrt(3)*R-np.sqrt(3)*x) * \
(y<=np.sqrt(3)*R+np.sqrt(3)*x) * \
(y>=-np.sqrt(3)*R+np.sqrt(3)*x)
z[mask] =np.cos(y[mask]*np.pi/np.sqrt(3)/R)*\
np.cos((y[mask]+np.sqrt(3)*x[mask])*np.pi/2/np.sqrt(3)/R)*\
np.cos((y[mask]-np.sqrt(3)*x[mask])*np.pi/2/np.sqrt(3)/R)
return z
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
R=10 #cm
X = np.linspace(-R, R, 1000)
Y = np.linspace(-R, R, 1000)
Xn, Yn = np.meshgrid(X, Y)
Z=hexFunc(Xn,Yn,R)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf=ax.plot_surface(Xn, Yn, Z, cmap="plasma")
fig.colorbar(surf)
plt.show()
plt.figure()
plt.contour(Xn,Yn,Z)
plt.axis('equal')
plt.colorbar()
plt.show()
plt.figure()
plt.contourf(Xn,Yn,Z)
plt.axis('equal')
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(Z)
plt.axis('equal')
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
ScipyThe name Scipy is a bit confusing: in a way Scipy is a whole ecosystem including numpy, matplotlib, pandas and other packages, but there is also a package called Scipy within this ecosystem. Probably one could spend a whole course just on covering the functionality of Scipy. It provides solutions to most of the engineering/science problems (fitting, optimization, integration, Fourier transform etc). Here we just use one illustrative example on how to solve simple coupled ODE systems with it by using the `solve_ivp` function from the `scipy.integrate` package, and later when we need some other functionality we will introduce other functions.Let's solve the simple decay chain when a parent nuclide decays into a radioactive daughter: $P\rightarrow D \rightarrow$, with decay constants $\lambda_P$ and $\lambda_D$. The differential equations characterizing this process are$$\frac{dN_P}{dt}=-\lambda_P N_P$$$$\frac{dN_D}{dt}=\lambda_P N_P-\lambda_D N_D$$with initial condition $N_P(t=0)=N_P(0)$ and $N_D(t=0)=0$.The same system could be written in matrix form:\begin{equation}\frac{d}{dt}\begin{pmatrix}N_P \\ N_D\end{pmatrix}=\begin{pmatrix}-\lambda_P & 0 \\ \lambda_P & -\lambda_D\end{pmatrix}\begin{pmatrix}N_P \\ N_D\end{pmatrix}\end{equation}or with using vectors ($N=(N_P \: N_D)$) and matrices\begin{equation} \dot N=AN\end{equation}We will use the `scipy.integrate.solve_ivp` function to solve numerically this system (note that we do not import the whole scipy library, only this function, and remember that you can access the documentation with `?solve_ivp`) in the form of $\dot y=Ay$. The solver requires a function in the form `myDerivative(t,y)` which describes the derivative $Ay$. Optinally this function can have some extra arguments. Then this derivative function is passed to `solve_ivp`, along with the time span (time window in which the integration is performed), the initial conditions and the times at which $y$ is evaluated, and any arguments (`args`) needed by the derivative function.`sol=solve_ivp(myDerivative,(Tstart,Tend),[InitialConditions],t_eval=TimesArray,args=(arguments))`
###Code
?solve_ivp
def derivDaughter(t,y,lp,ld):
A=np.array([[-lp,0.0],[lp,-ld]])
return np.dot(A,y)
from scipy.integrate import solve_ivp
TP=14.9 #d, parent, and all time units are considered to be in days
lP=np.log(2)/TP
TD=9.9
lD=np.log(2)/TD
N0=50
Tstart=0.0
Tend=90.0 #days
Neval=1001
T_eval=np.linspace(Tstart,Tend, Neval)
sol=solve_ivp(derivDaughter,(Tstart,Tend),[N0,0.0],t_eval=T_eval,args=(lP, lD))
plt.figure()
plt.plot(sol.t,sol.y[0],label='Ra')
plt.plot(sol.t,sol.y[1],label='Act')
plt.xlabel('time (d)')
plt.legend()
plt.ylabel('Number of nuclei')
plt.show()
?solve_ivp
###Output
_____no_output_____
###Markdown
find() and reIt is often required in science and engineering applications that one parses various files (for example outputs produced by a software, or data files), and need to extract some specific information. Python provides a basic string method (https://www.w3schools.com/python/python_ref_string.asp), to find strings within strings. Take a look at the code below. Let's assume that some software gives the final results in the following format, and we are interested in the 'keff' value. We could locate the number by matching the `=` sign, and the word `with`. Of course this is not perfect. What happens if a word or substring appears more than once?
###Code
resuStr='the final estimated keff = 1.04562 with an estimated standard deviation of 0.00057'
i=resuStr.find('=')
print(i)
j=resuStr.find('with')
print(j)
print(float(resuStr[i+1:j]))
###Output
_____no_output_____
###Markdown
A more accurate solution to problems like this is to use regular expressions. Regular expressions are not a part of python, it is an independent "language", which allows the description of a search pattern as a string. Nevertheless, python has a package `re` which let's python users apply regexp. Hardcore geeks probably know by heart all the possible regexp patterns, and I certainly advice you to [read up on them](https://www.rexegg.com/regex-quickstart.html), nevertheless for this course we will only need to read numbers from strings. And you can most often survive by googling "regexp integer within string" or similar phrases depending on your application.One function of the `re` package is `re.findall`, the first input should be a regular expression as a string, and the second is the string in which you would like to perform the search. Take the regular expression below, which will match floating point numbers. The pattern `[+-]?\d+\.\d+` encodes a floating point number: in `[+-]?` the ? indicates zero or one occurence of the preceding element, which in this case is one of the characters in the `[]`. Outside of the bracket, the + indicates one or more occurance of the preceding element, and `\d` indicates a digit, so `\d+` will match one or more digits. In total `\d+\.\d+` will look for one or more digits followed by a floating point and followed by one or more digits. This pattern will not match numbers written as '1.' or '.9932'.
###Code
import re
re.findall('[+-]?\d+\.\d+',resuStr)
###Output
_____no_output_____
###Markdown
An other typical example a nuclear engineer faces often is that you'll need to split a string (for example the name of a nuclide), to get the symbol and the mass number of the nuclide.
###Code
myNuc='U238'
re.split('(\d+)', myNuc)
###Output
_____no_output_____
###Markdown
Exercises 1Construct a function $f(x,y)$ which becomes zero at the edges of a rectangle centered around the origin and with a side length of $a$. First create a 2D graphic representing the rectangle, and annotate one vertical and one horizontal side by giving the equation of the side. Then, use `plot_surface` and `contour` to visualize the function. Include the equation of the surface here with latex code:And write your function and code for plotting below.
###Code
#your code comes here
###Output
_____no_output_____
###Markdown
2Consider the following decay chain $U-234 \rightarrow Th-230 \rightarrow Ra-226 \rightarrow Rn-222 \rightarrow$, which is part of the U-238 decay serie. The alpha decays have the following half-lifes: 245.5ky, 75.38ky, 1602y and 3.82d. Use scipy, and matplotlib to plot the number of nuclides and the activity between 0 and 15 kyears if $N_{U-234}(t=0)=5000000$, and the daughters are not present at $t=0$.- Inspect the half-lifes, what do you expect, what can cause numerical issues in this case?- What is your expectation on how will the activity of Ra-226 and Rn-222 compare to each other?
###Code
#your code comes here
###Output
_____no_output_____
###Markdown
3The minimum energy required to disassemble the nucleus of an atom into its components is the binding energy. In this exercise you are going to compute the binding energy per nucleon for several nuclides in two ways:1. based on the relative atomic mass of the nuclides2. based on a semi-empirical formula.If we substract the mass of the nucleus from the mass of the nucleons and express it in energy unit we get the binding energy:$B(A,Z)=[Z\cdot m_p+(A-Z)\cdot m_n-M(A,Z)]c^2$For a given nuclide one can estimate the binding energy with the semi-empirical Bethe–Weizsäcker formula, which has various forms in literature. Here you can use the following form:$B(A,Z)=15.75A-94.8\frac{(A/2 - Z)^2}{A}-17.8A^{2/3}-0.71Z^2A^{-1/3}+34\delta A^{-3/4}$where $\delta = 1$ for even-even nuclei, $\delta = -1$ for odd-odd nuclei and $\delta = 0$ otherwise.(Note: that all terms in the formula have a physical meaning: volume term, assymmetry term, surface term, Coulomb term and pairing term. Make sure based on the text book that you understand these).In the first formula one can express the neutron and proton mass in unified mass units (u). Also notice, that in most tabulated data (also here) you can only look up the relative atom mass of isotopes, and not the mass of the nucleus, thus the mass of the electrons needs to be taken into account as well. The reason for this is that for high Z isotopes it is difficult to remove all the electrons, so the neutral atoms are measured instead.You are given the relative mass (in unified mass units) of several nuclides (downloaded from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=&ascii=html) as a python nested dictionary `nuclides`. Dictionaries are used to store data values in key:value pairs. It is basically an unorder array with "named" columns. In this case the outer keys are nuclide identifiers, and the values are also dictionaries, with the keys being Z, A and m.Your task is to 1. implement two functions which calculate $\varepsilon=B(A,Z)/A$ with the two methods listed above.2. apply the functions for the nuclides listed in `nuclides`3. Plot the binding energy curves. Use solid line for the semi-empirical formula, and red dot markers `ro` for the results taking into account the relative mass4. Find the nuclide with the highest binding energy per nucleon, and use `plt.annotate` to include the name of the nuclide in the figure above the corresponding marker.
###Code
import numpy as np
import matplotlib.pyplot as plt
me=5.48579909070e-4 #u
mn=1.00866491588 #u
mp=1.007276466621 #u
muc2=931.49410242 #MeV
nuclides={'H2': {'Z': 1, 'A': 2, 'm': 2.01410177812},
'H3': {'Z': 1, 'A': 3, 'm': 3.0160492779},
'He3': {'Z': 2, 'A': 3, 'm': 3.0160293201},
'He4': {'Z': 2, 'A': 4, 'm': 4.002603254},
'Li6': {'Z': 3, 'A': 6, 'm': 6.0151228874},
'O16': {'Z': 8, 'A': 16, 'm': 15.99491461957},
'S34': {'Z': 16, 'A': 34, 'm': 33.96786701},
'Fe56': {'Z': 26, 'A': 56, 'm': 55.9349375},
'Ni62': {'Z': 28, 'A': 62, 'm': 61.92834537},
'Kr84': {'Z': 36, 'A': 84, 'm': 83.9114977282},
'Sn119': {'Z': 50, 'A': 119, 'm': 118.90331117},
'Ti205': {'Z': 81, 'A': 205, 'm': 204.9744278},
'U238': {'Z': 92, 'A': 238, 'm': 238.0507884}}
def BAZ(A,Z,m):
"""Function to calculate the binding energy per nucleon
Parameters
----------
A : int
Mass number of nuclide
Z : int
Proton number of nuclide
m : float
Mass of the nuclide
"""
eps=#YOUR CODE HERE
return eps/A
def BAZ_BW(A,Z):
"""Function to calculate the binding energy per nucleon
with the semi-empirical formula
Parameters
----------
A : int
Mass number of nuclide
Z : int
Proton number of nuclide
"""
#your code comes here
return eps/A
#your code to apply these functions and to plot the results comes here.
###Output
_____no_output_____ |
examples/Give Me Your Attention.ipynb | ###Markdown
Neural Language Model Given some content $C$ as a sequence of words, and some words $x_1, \ldots, x_n$, a language model (LM) is modeling the conditional probability\begin{equation} P \left( x_{n+1} \mid x_1, \ldots, x_n; C \right).\end{equation} The original nerual language model, a language model by neural netwok, is implemented by RNN. In the case of nerual machine translation (as a LM), it is a RNN auto-encoder. The encoder encodes the sentence from language A, as a sequence of words in A, to a vector; and then the decoder decodes the vector to language B, as another sequence of words in B. Backward of RNN Encoder The information contained the final encoding vector is limited by the dimension of the vector. A `float32` value in range $(-1, 1)$ has $24$ bits. If the encoding dimension is $1000$ (large), then the total information is $1000 \times 24 = 24000$ bits. Can these bits encode all the poems of Shakespeare? No way! The solution to this obstacle is, naturally, to use the full encoded sequence, instead of the last element of the sequence. This retains almost all the information.However, at every step of decoding, not all the elements in the sequence are relavent. So, we, or say the machine, has to figure out the relavence. This is attention. Focus your Attention! A detailed architecture:
###Code
import abc
import tensorflow as tf
class BaseAttention(abc.ABC):
"""Abstract base class of attention."""
# Abbreviations for shapes:
# batch_shape -> B (list of `int`s)
# seqlen -> L (`int`)
# query_dim -> Q (`int`)
# key_dim -> K (`int`)
# value_dim -> V (`int`)
def __call__(self, query, keys, values, name='Attention'):
"""Returns the context, with attention-score.
Args:
query: Tensor with shape `batch_shape + [query_dim]`.
keys: Tensor with shape `batch_shape + [seqlen, key_dim]`.
values: Tensor with shape `batch_shape + [seqlen, value_dim]`.
name: String.
Returns:
Tuple of two tensors. The first with shape `batch_shape + [value_dim]`
as the context; and the second with shape `batch_shape + [seqlen]` as
the attention-score.
"""
with tf.name_scope(name):
score = self._score(query, keys) # B + [L, 1]
# score * values: B + [L, V]
context = tf.reduce_sum(score * values, axis=-2) # B + [V]
score = tf.squeeze(score, axis=-1) # B + [L]
return score, context
def _score(self, query, keys, name='attention_score'):
"""Returns the attention-score.
Args:
query: Tensor with shape `batch_shape + [query_dim]`.
keys: Tensor with shape `batch_shape + [seqlen, key_dim]`.
name: String.
Returns:
Tensor with shape `batch_shape + [seqlen, 1]`. The additional `1`
is made for convienence for being contracted by a "values" tensor
with shape `batch_shape + [seqlen, value_dim]` along the `seqlen`
axis.
"""
with tf.name_scope(name):
with tf.name_scope('repeat'): # along `seqlen`-axis.
query = tf.expand_dims(query, axis=-2) # B + [1, Q]
query = tf.tile(query, self._get_repeats(keys)) # B + [L, Q]
concated = tf.concat([query, keys], axis=-1) # B + [L, Q+K]
energy = self.energy(concated) # B + [L, 1]
# Softmax along the `L`-axis
attention_score = tf.nn.softmax(energy, axis=-2) # B + [L, 1]
return attention_score
def _get_repeats(self, keys):
"""Returns the `repeats` argument of the `tf.tile()` in `self.__call__()`."""
with tf.name_scope('repeats'):
shape = keys.get_shape().as_list()
rank = len(shape)
seqlen = shape[-2]
return [1] * (rank - 2) + [seqlen, 1]
@abc.abstractmethod
def energy(self, x):
"""
Args:
x: Tensor with shape `batch_shape + [query_dim + key_dim]`.
Returns:
Tensor with shape `batch_shape + [1]`.
"""
pass
###Output
_____no_output_____
###Markdown
A Simple Example We show up a simple enough example found in [Ng's course](https://github.com/GSimas/Deep-LearningAI/tree/master/Course%205/Week%203/Neural%20Machine%20Translation). This is a simple neural machine translation model that converts date-format from the natural language format for human to the standard format for machine. Date-Format Data
###Code
import numpy as np
from nmt_utils import load_dataset, preprocess_data
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(10000)
###Output
100%|██████████| 10000/10000 [00:00<00:00, 17223.09it/s]
###Markdown
Some instances:
###Code
dataset[:10]
###Output
_____no_output_____
###Markdown
The vocabularies for both human and machine are character-level:
###Code
print('Human vocabulary:', human_vocab, '\n')
print('Machine vocabulary:', machine_vocab)
###Output
Human vocabulary: {' ': 0, '.': 1, '/': 2, '0': 3, '1': 4, '2': 5, '3': 6, '4': 7, '5': 8, '6': 9, '7': 10, '8': 11, '9': 12, 'a': 13, 'b': 14, 'c': 15, 'd': 16, 'e': 17, 'f': 18, 'g': 19, 'h': 20, 'i': 21, 'j': 22, 'l': 23, 'm': 24, 'n': 25, 'o': 26, 'p': 27, 'r': 28, 's': 29, 't': 30, 'u': 31, 'v': 32, 'w': 33, 'y': 34, '<unk>': 35, '<pad>': 36}
Machine vocabulary: {'-': 0, '0': 1, '1': 2, '2': 3, '3': 4, '4': 5, '5': 6, '6': 7, '7': 8, '8': 9, '9': 10}
###Markdown
Preprocessing to model inputs: 1. replacing unknown with ""; 1. post-padding; 1. the inputs and outputs (targets) are all in one-hot format.
###Code
input_seqlen = 30
output_seqlen = 10
X, y, X_oh, y_oh = preprocess_data(
dataset, human_vocab, machine_vocab, input_seqlen, output_seqlen)
index = 0
print("Source date:", dataset[index][0])
print("Target date:", dataset[index][1])
print()
print("Source after preprocessing (indices):", X[index])
print("Target after preprocessing (indices):", y[index])
print()
print("Source after preprocessing (one-hot):", X_oh[index])
print("Target after preprocessing (one-hot):", y_oh[index])
###Output
Source date: 9 may 1998
Target date: 1998-05-09
Source after preprocessing (indices): [12 0 24 13 34 0 4 12 12 11 36 36 36 36 36 36 36 36 36 36 36 36 36 36
36 36 36 36 36 36]
Target after preprocessing (indices): [ 2 10 10 9 0 1 6 0 1 10]
Source after preprocessing (one-hot): [[0. 0. 0. ... 0. 0. 0.]
[1. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 1.]
[0. 0. 0. ... 0. 0. 1.]
[0. 0. 0. ... 0. 0. 1.]]
Target after preprocessing (one-hot): [[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]
###Markdown
RNN Auto-Encoder with Attention The model is illustrated as follow:And now let's implement it.
###Code
from tensorflow.keras.layers import Bidirectional, Dense, Dropout
from tfutils.initializer import GlorotInitializer
if tf.test.is_gpu_available():
from tensorflow.keras.layers import CuDNNLSTM as LSTM
else:
from tensorflow.keras.layers import LSTM
class Attention(BaseAttention):
def __init__(self, energy_units, **kwargs):
super().__init__(**kwargs)
self._layers = []
for n in energy_units:
self._layers += [tf.layers.Dense(n, activation=tf.nn.relu),
tf.layers.Dropout()]
self._layers += [tf.layers.Dense(1)]
def energy(self, x):
with tf.name_scope('energy'):
for layer in self._layers:
x = layer(x)
return x
def model_fn(features, labels, mode, params):
"""The `labels` is one-hot."""
pre_rnn = Bidirectional(LSTM(params['pre_lstm_units'],
return_sequences=True))
attention = Attention(params['energy_units'])
post_rnn = LSTM(params['post_lstm_units'],
return_state=True)
# Returns the logits
output_layer = Dense(params['output_vocab_size'])
# [batch_size, post_lstm_units]
s = features['init_s']
c = features['init_c']
# [batch_size, input_seqlen, input_vocab_size]
onehot_inputs = features['onehot_inputs']
# [batch_size, input_seqlen, pre_lstm_units * 2]
a_seq = pre_rnn(onehot_inputs)
# Implements the loop
output_seqlen = tf.constant(params['output_seqlen'])
logits = tf.TensorArray(dtype=s.dtype, size=output_seqlen)
attention_scores = tf.TensorArray(dtype=s.dtype, size=output_seqlen)
def cond(logits, attention_scores, s, c, step):
return tf.less(step, output_seqlen)
def body(logits, attention_scores, s, c, step):
# [batch_size, input_seqlen] and
# [batch_size, pre_lstm_units * 2]
attention_score, context = attention(s, a_seq, a_seq)
with tf.name_scope('logit'):
# Prepare for inputting `post_rnn`
# [batch_size, 1, pre_lstm_units * 2]
context = tf.expand_dims(context, axis=-2)
# [batch_size, post_lstm_units]
s, _, c = post_rnn(context, initial_state=[s, c])
# [batch_size, output_vocab_size]
logit = output_layer(s)
logits = logits.write(step, logit)
attention_scores = attention_scores.write(step, attention_score)
return logits, attention_scores, s, c, (step + 1)
logits, attention_scores, *_ = tf.while_loop(
cond, body, loop_vars=[logits, attention_scores, s, c, 0])
# Postprocess the TensorArray
# [output_seqlen, batch_size, output_vocab_size]
logits = logits.stack()
# [batch_size, output_seqlen, output_vocab_size]
logits = tf.transpose(logits, [1, 0, 2])
# [output_seqlen, batch_size, input_seqlen]
attention_scores = attention_scores.stack()
# [batch_size, output_seqlen, input_seqlen]
attention_scores = tf.transpose(attention_scores, [1, 0, 2])
# [batch_size, output_seqlen, output_vocab_size]
predict_probs = tf.nn.softmax(logits, axis=-1)
# [batch_size, output_seqlen]
predict_classes = tf.argmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'logits': logits,
'probs': predict_probs,
'class_ids': predict_classes,
'attention_scores': attention_scores,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.softmax_cross_entropy(labels, logits)
# [batch_size, output_seqlen]
label_classes = tf.argmax(labels, axis=-1)
accuracy = tf.metrics.accuracy(
label_classes, predict_classes, name='acc_op')
metrics = {'accuracy': accuracy}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdamOptimizer(params['lr'])
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
tf.reset_default_graph()
params={
'lr': 0.05,
'output_vocab_size': y_oh.shape[-1],
'pre_lstm_units': 32,
'energy_units': [128],
'post_lstm_units': 64,
'output_seqlen': y_oh.shape[-2],
}
estimator = tf.estimator.Estimator(model_fn=model_fn,
params=params)
print('\n', params)
###Output
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp4n0pb66v
INFO:tensorflow:Using config: {'_model_dir': '/tmp/tmp4n0pb66v', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fc45ae1ecc0>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
{'lr': 0.05, 'output_vocab_size': 11, 'pre_lstm_units': 32, 'energy_units': [128], 'post_lstm_units': 64, 'output_seqlen': 10}
###Markdown
Train and Evaluate the Model
###Code
def train_input_fn(X_oh, y_oh, init_s, init_c, batch_size):
"""An input function for training"""
features = {'onehot_inputs': X_oh, 'init_s': init_s, 'init_c': init_c}
dataset = tf.data.Dataset.from_tensor_slices((features, y_oh))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
return dataset
def eval_input_fn(X_oh, y_oh, init_s, init_c, batch_size):
"""An input function for evaluating and predicting."""
features = {'onehot_inputs': X_oh, 'init_s': init_s, 'init_c': init_c}
if y_oh is None:
dataset = tf.data.Dataset.from_tensor_slices(features)
else:
dataset = tf.data.Dataset.from_tensor_slices((features, y_oh))
dataset = dataset.batch(batch_size) # shall NOT shuffle and repeat!
return dataset
def get_inits(n_data, params):
init_s = np.zeros([n_data, params['post_lstm_units']], dtype='float32')
init_c = np.zeros([n_data, params['post_lstm_units']], dtype='float32')
return init_s, init_c
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_oh, y_oh)
init_s, init_c = get_inits(X_train.shape[0], params)
estimator.train(
input_fn=lambda: train_input_fn(X_train, y_train, init_s, init_c, 100),
steps=2000)
init_s, init_c = get_inits(X_test.shape[0], params)
estimator.evaluate(input_fn=lambda: eval_input_fn(
X_test, y_test, init_s, init_c, batch_size=128))
###Output
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Starting evaluation at 2019-04-12-09:31:08
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp4n0pb66v/model.ckpt-2000
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Finished evaluation at 2019-04-12-09:31:09
INFO:tensorflow:Saving dict for global step 2000: accuracy = 1.0, global_step = 2000, loss = 4.37774e-05
INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2000: /tmp/tmp4n0pb66v/model.ckpt-2000
###Markdown
It Works
###Code
from typing import List, Dict
def pad(input_seqlen: int,
chars: List[str],
) -> List[str]:
chars = chars[:input_seqlen]
chars += ['<pad>'] * (input_seqlen - len(chars))
return chars
def unk(input_vocab: Dict[str, int],
chars: List[str],
) -> List[str]:
chars = [c if c in input_vocab else '<unk>' for c in chars]
return chars
def onehot(input_vocab: Dict[str, int],
chars: List[str],
) -> List[List[int]]:
# To indices
indices = [input_vocab[c] for c in chars]
# To onehot, shape `[input_seqlen, input_vocab_size]`
return [[1 if i == index else 0 for i, _ in enumerate(input_vocab)]
for index in indices]
def preprocess_sources(sources: List[str],
input_seqlen: int,
input_vocab: Dict[str, int]
) -> np.array:
def preprocess_source(source: str) -> List[List[int]]:
chars = [c for c in source.lower()]
chars = unk(input_vocab, chars)
chars = pad(input_seqlen, chars)
return onehot(input_vocab, chars)
# [len(sources), input_seqlen, input_vocab_size]
return np.array([preprocess_source(_) for _ in sources],
dtype='float32')
def postprocess(sources: List[str], predictions: dict) -> List[dict]:
all_processed = []
for src, pred in zip(sources, predictions):
prediction = ''.join(inv_machine_vocab[_] for _ in pred['class_ids'])
processed = {
'source': src,
'prediction': prediction,
'attention_scores': pred['attention_scores'],
'logits': pred['logits']
}
all_processed.append(processed)
return all_processed
sources = [
'3 May 2079', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007',
'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001'
]
X_oh_pred = preprocess_sources(sources, X_oh.shape[-2], human_vocab)
n_data_pred = X_oh_pred.shape[0]
input_seqlen = X_oh_pred.shape[1]
init_s_pred, init_c_pred = get_inits(len(sources), params)
predicted = estimator.predict(input_fn=lambda: eval_input_fn(
X_oh_pred, None, init_s_pred, init_c_pred, batch_size=128))
predictions = postprocess(sources, predicted)
for src, pred in zip(sources, predictions):
print(src)
print(pred['prediction'])
print()
###Output
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmp4n0pb66v/model.ckpt-2000
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
3 May 2079
2979-05-03
5 April 09
2009-04-05
21th of August 2016
2016-08-21
Tue 10 Jul 2007
2007-07-10
Saturday May 9 2018
2018-05-09
March 3 2001
2001-03-03
March 3rd 2001
2001-03-03
1 March 2001
2001-03-01
###Markdown
Why Attention The attention has one advantage that almost any other NN architecture absents, that is, the explainability!
###Code
import matplotlib.pyplot as plt
from typing import List
def plot_attention(attention: np.array,
source: str,
prediction: str,
figsize=(7, 7)):
def preprocess(s: str) -> List[str]:
return ['*' if c == ' ' else c for c in s.lower()]
source = preprocess(source)
prediction = preprocess(prediction)
attention = attention[:len(prediction), :len(source)]
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
# Set axes
fontdict = {'fontsize': 12}
ax.set_xticks(range(0, len(source)))
ax.set_xticklabels(source, fontdict=fontdict)
ax.set_yticks(range(0, len(prediction)))
ax.set_yticklabels(prediction, fontdict=fontdict)
plt.show()
for pred in predictions:
plot_attention(pred['attention_scores'], pred['source'], pred['prediction'])
###Output
_____no_output_____ |
Data_Acquisition/as3uj_web_crawler.ipynb | ###Markdown
Reading block dates in from ipblocks
###Code
#ipblocks = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/ipblocks_fulldump_new.csv")
#del ipblocks['Unnamed: 0']
ipblocks = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/ipblocks_fulldump_20190223.txt",sep='\t')
ipblocks.tail()
ipblocks = ipblocks[['ipb_address','date']]
ipblocks.tail()
###Output
_____no_output_____
###Markdown
Removing the null rows
###Code
ipblocks.isnull().sum()
ipblocks.shape
ipblocks = ipblocks[ipblocks['ipb_address'].isnull()==False]
ipblocks.isnull().sum()
ipblocks.shape
###Output
_____no_output_____
###Markdown
Developing for single user (Skip to Iterative code)
###Code
ipblocks[ipblocks['ipb_address']=='Sad.tbhs']
buser = 'Sad.tbhs'
bdate = '20190217'
new_url_1 = 'https://en.wikipedia.org/w/index.php?title=Special:Contributions&offset='+bdate+'235959&limit=20&contribs=user&target='+buser+'&namespace=3&tagfilter=&start=&end='
new_url_1
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
print(soup1.prettify())
type(soup1)
###Output
_____no_output_____
###Markdown
Shortening to only needed lines
###Code
list(soup1.find('div',id='mw-content-text').children)[4].get_text()
for i in list(soup1.find('div',id='mw-content-text')):
try:
if i.get_text() == 'No changes were found matching these criteria.\n':
print('yes')
else:
print('no')
except:
continue
if list(soup1.find('div',id='mw-content-text').children)[1].get_text() == 'No changes were found matching these criteria.\n':
print('yes')
soup1 = soup1.find('ul',class_='mw-contributions-list')
soup1
type(soup1)
lines = soup1.findAll('li')
lines
type(lines)
len(lines)
type(lines[0])
list(lines[0].children)
date = list(lines[0].children)[0].get_text()
date
char_change = list(lines[0].children)[6].get_text()
char_change
li = list(lines[0].children)[2]
li
type(li)
list(li.children)
diff = list(li.children)[0]
diff
type(diff)
type(list(diff.children)[0])
list(diff.children)[0]
link = list(diff.children)[0].get('href')
link
###Output
_____no_output_____
###Markdown
Now trying this iteratively to get all 20 links for namespace3
###Code
new_url_1 = 'https://en.wikipedia.org/w/index.php?title=Special:Contributions&offset='+bdate+'235959&limit=20&contribs=user&target='+buser+'&namespace=3&tagfilter=&start=&end='
new_url_1
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
soup1 = soup1.find('ul',class_='mw-contributions-list')
lines = soup1.findAll('li')
dates = []
char_changes = []
links = []
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
dates
char_changes
links
###Output
_____no_output_____
###Markdown
Similarly adding the lines from namespace1
###Code
new_url_2 = 'https://en.wikipedia.org/w/index.php?title=Special:Contributions&offset='+bdate+'235959&limit=20&contribs=user&target='+buser+'&namespace=1&tagfilter=&start=&end='
new_url_2
response2 = requests.get(new_url_2)
html2 = response2.content
soup2= BeautifulSoup(html2,'html.parser')
print(soup2.prettify())
soup2 = soup2.find('ul',class_='mw-contributions-list')
lines = soup2.findAll('li')
lines
#list(lines[1].children)[0].get_text()
#list(lines[1].children)[6].get_text()
#list(lines[1].children)[2]
#li = list(lines[1].children)[2]
#li
#list(li.children)
#diff = list(li.children)[0]
#diff
#list(diff.children)[0]
#list(diff.children)[0].get('href')
#link = list(diff.children)[0].get('href')
#link
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
dates
char_changes
links
user = pd.DataFrame({
"user": buser,
"dates": dates,
"char_changes": char_changes,
"links":links
})
user
###Output
_____no_output_____
###Markdown
Stage 1: Iterative through list of users to get revision links(read from pandas)
###Code
#blocked = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/blocked_users_1718.csv")
blocked = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/ipb_remaining3.csv")
blocked.head(20)
blocked.shape
ipb = blocked.iloc[20000:,]
#ipb = blocked
ipb
ipb = pd.merge(ipb,ipblocks,how="left",left_on=["rev_user_text"],right_on=["ipb_address"])
ipb.head()
ipb.isnull().sum()
del ipb['ipb_address']
ipb.head()
buser = []
dates = []
char_changes = []
links = []
exc_count1 = 0
exc_count2 = 0
exc_count3 = 0
exc_count4 = 0
user_less_than3 = []
namespace = []
user_all = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
user_all
start = time.time()
for index, row in ipb.iterrows():
buser = row['rev_user_text']
bdate = str(row['date'])
# For namespace 3
dates = []
char_changes = []
links = []
flag = 0
new_url_1 = 'https://en.wikipedia.org/w/index.php?title=Special:Contributions&offset='+bdate+'235959&limit=20&contribs=user&target='+buser+'&namespace=3&tagfilter=&start=&end='
try:
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
except:
exc_count4 = exc_count4 + 1
print('Connection error at {0}'.format(new_url_1))
continue
for i in list(soup1.find('div',id='mw-content-text')):
try:
if i.get_text() == 'No changes were found matching these criteria.\n':
flag = 1
except:
continue
if flag == 1:
exc_count1 = exc_count1+1
continue
else:
try:
soup1 = soup1.find('ul',class_='mw-contributions-list')
lines = soup1.findAll('li')
if len(lines)<3:
user_less_than3.append(buser)
namespace.append('User Talk')
continue
except:
exc_count2 = exc_count2+1
continue
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
exc_count3 = exc_count3+1
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
user = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
frames = [user_all,user]
user_all = pd.concat(frames)
for index, row in ipb.iterrows():
buser = row['rev_user_text']
bdate = str(row['date'])
# For namespace 1
dates = []
char_changes = []
links = []
flag = 0
new_url_1 = 'https://en.wikipedia.org/w/index.php?title=Special:Contributions&offset='+bdate+'235959&limit=20&contribs=user&target='+buser+'&namespace=1&tagfilter=&start=&end='
try:
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
except:
exc_count4 = exc_count4 + 1
print('Connection error at {0}'.format(new_url_1))
continue
for i in list(soup1.find('div',id='mw-content-text')):
try:
if i.get_text() == 'No changes were found matching these criteria.\n':
flag = 1
except:
continue
if flag == 1:
exc_count1 = exc_count1+1
continue
else:
try:
soup1 = soup1.find('ul',class_='mw-contributions-list')
lines = soup1.findAll('li')
if len(lines)<3:
user_less_than3.append(buser)
namespace.append('Article Talk')
continue
except:
exc_count2 = exc_count2+1
continue
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
exc_count3 = exc_count3+1
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
user = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
frames = [user_all,user]
user_all = pd.concat(frames)
end = time.time()
print(end - start)
print(exc_count1,exc_count2,exc_count3,exc_count4)
user_all
###Output
_____no_output_____
###Markdown
Adding required https to link
###Code
user_all['links'] = 'https://en.wikipedia.org'+ user_all['links']
user_all
len(set(user_all['user']))
###Output
_____no_output_____
###Markdown
4702 users out of 10000 have namespace 1 and 3 diff data available 3338 out of next 10000 have namespace 1 and 3 diff data available 2482 out of next 10000 have namespace 1 and 3 diff data available 2543 out of next 10000 have namespace 1 and 3 diff data available 6272 out of top 11303
###Code
ipb_userlist = set(ipb['rev_user_text'])
available = set(user_all['user'])
no_data = ipb_userlist - available
list(no_data)
len(no_data)
user_all.to_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/blocked_more6.csv')
u_3 = pd.DataFrame({"rev_user_text":user_less_than3,"namespace":namespace})
u_3
u_3.to_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/less_than3_examples3.csv')
###Output
_____no_output_____
###Markdown
Stage 2 - iterating through all the revision diff links Developing for single user(skip to iterative code)
###Code
new_url_1 = list(user_all['links'])[25]
new_url_1
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
print(soup1.prettify())
lines = soup1.findAll('td',class_='diff-addedline')
lines
lines[0]
type(lines[0])
lines[0].get_text()
lines[1].get_text()
lines[2].get_text()
###Output
_____no_output_____
###Markdown
Getting this data iteratively
###Code
user_all = pd.read_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/blocked_more3.csv')
del user_all['Unnamed: 0']
user_all
user_all = user_all.iloc[1850:,]
user_all
user_all['text'] = ''
user_all
start = time.time()
st2_exec_count = 0
for index,row in user_all.iterrows():
try:
if index==1891:
print('Skipping adding text at {0}'.format(index))
continue
new_url_1 = row['links']
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
lines = soup1.findAll('td',class_='diff-addedline')
txt = ''
for line in lines:
txt= txt + line.get_text()
user_all.at[index,'text'] = txt
print('Completed adding text at {0}'.format(index))
except:
st2_exec_count = st2_exec_count + 1
print('Exception at {0}'.format(index))
continue
end = time.time()
print(end - start)
user_all
user_all.to_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/blocked_more3_2_stage2.csv')
print(st2_exec_count)
list(user_all['text'])[0]
list(user_all['text'])[1]
list(user_all['text'])[41]
list(user_all['text'])[42]
###Output
_____no_output_____
###Markdown
Similarly for Non blocked users
###Code
#nonblocked = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/nonblocked_users_1718.csv")
nonblocked = pd.read_csv("/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/ipnb_remaining.csv")
nonblocked.head(20)
#ipnb = nonblocked.head(10000)
ipnb = nonblocked.iloc[:10000,]
ipnb
buser = []
dates = []
char_changes = []
links = []
exc_count1 = 0
exc_count2 = 0
exc_count3 = 0
exc_count4 = 0
user_all = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
user_all
start = time.time()
for index, row in ipnb.iterrows():
buser = row['rev_user_text']
# For namespace 3
dates = []
char_changes = []
links = []
flag = 0
new_url_1 = 'https://en.wikipedia.org/w/index.php?limit=20&title=Special%3AContributions&contribs=user&target='+buser+'&namespace=3&tagfilter=&start=&end='
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
try:
for i in list(soup1.find('div',id='mw-content-text')):
try:
if i.get_text() == 'No changes were found matching these criteria.\n':
flag = 1
except:
continue
except:
exc_count4=exc_count4+1
continue
if flag == 1:
exc_count1=exc_count1+1
continue
else:
try:
soup1 = soup1.find('ul',class_='mw-contributions-list')
lines = soup1.findAll('li')
if len(lines)<3:
#user_less_than3.append(buser)
continue
except:
exc_count2=exc_count2+1
continue
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
exc_count3=exc_count3+1
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
user = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
frames = [user_all,user]
user_all = pd.concat(frames)
# For namespace 1
for index, row in ipnb.iterrows():
buser = row['rev_user_text']
dates = []
char_changes = []
links = []
flag = 0
new_url_1 = 'https://en.wikipedia.org/w/index.php?limit=20&title=Special%3AContributions&contribs=user&target='+buser+'&namespace=1&tagfilter=&start=&end='
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
try:
for i in list(soup1.find('div',id='mw-content-text')):
try:
if i.get_text() == 'No changes were found matching these criteria.\n':
flag = 1
except:
continue
except:
exc_count4=exc_count4+1
continue
if flag == 1:
exc_count1=exc_count1+1
continue
else:
try:
soup1 = soup1.find('ul',class_='mw-contributions-list')
lines = soup1.findAll('li')
if len(lines)<3:
#user_less_than3.append(buser)
continue
except:
exc_count2=exc_count2+1
continue
for line in lines:
date = list(line.children)[0].get_text()
char_change = list(line.children)[6].get_text()
li = list(line.children)[2]
diff = list(li.children)[0]
if(list(diff.children)[0]=='diff'):
exc_count3=exc_count3+1
continue
link = list(diff.children)[0].get('href')
dates.append(date)
char_changes.append(char_change)
links.append(link)
user = pd.DataFrame({"user": buser,"dates": dates,"char_changes": char_changes,"links":links})
frames = [user_all,user]
user_all = pd.concat(frames)
end = time.time()
print(end - start)
print(exc_count1,exc_count2,exc_count3,exc_count4)
user_all
###Output
_____no_output_____
###Markdown
Adding required https to link
###Code
user_all['links'] = 'https://en.wikipedia.org'+ user_all['links']
user_all
len(set(user_all['user']))
###Output
_____no_output_____
###Markdown
382 users out of 10000 have namespace 1 and 3 diff data available 374 users out of next 30000 have namespace 1 and 3 diff data available 825 users out of next 60000 have namespace 1 and 3 diff data available 3303 users out of next 100000 have namespace 1 and 3 diff data available 2418 users out of next 50000 have namespace 1 and 3 diff data available 1883 users out of next 50000 have namespace 1 and 3 diff data available 3254 users out of next 100000 have namespace 1 and 3 diff data available 538 users out of next 20000 have namespace 1 and 3 diff data available 8601 users out of top 10000
###Code
ipnb_userlist = set(ipnb['rev_user_text'])
available = set(user_all['user'])
no_data = ipnb_userlist - available
list(no_data)
len(no_data)
user_all.to_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/nonblocked_top1.csv')
###Output
_____no_output_____
###Markdown
Stage 2
###Code
user_all = pd.read_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/nonblocked_top1.csv')
del user_all['Unnamed: 0']
user_all
user_all = user_all.iloc[180000:,]
user_all
user_all['text'] = ''
user_all
start = time.time()
st2_exec_count = 0
for index,row in user_all.iterrows():
try:
new_url_1 = row['links']
response1 = requests.get(new_url_1)
html1 = response1.content
soup1= BeautifulSoup(html1,'html.parser')
lines = soup1.findAll('td',class_='diff-addedline')
txt = ''
for line in lines:
txt= txt + line.get_text()
user_all.at[index,'text'] = txt
except:
st2_exec_count = st2_exec_count + 1
continue
end = time.time()
print(end - start)
print(st2_exec_count)
user_all
user_all.to_csv('/home/ec2-user/SageMaker/bucket/wiki_trust/revisions_data/csvs_stored/nonblocked_top1_7_stage2.csv')
###Output
_____no_output_____ |
Notebooks/5-humidity-precip-windspeed-MODELLING/model_build_fast_WINDSPEED.ipynb | ###Markdown
Model Build - Fast Hyper Parameter SearchBuilding and testing models notebook for Google ColabThe gridsearch was taking way too long. This script now includes a function that searches over the hyperparemeters far more rapidly. Its not as thorough but given what we saw last time, it doesnt need to be.This was primarily used to create optimised models for the weather variables besides temperature.For more thorough commentary, please see model_build_smooth.Be sure to switch to GPU in the run time
###Code
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import time
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, LSTM, GRU, Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from scipy.ndimage import gaussian_filter1d
print(tf.config.list_physical_devices())
#my file path to data on Gdrive
! ls drive/MyDrive/0_neural_net_weather_forecasts_on_cloud/Data
os.chdir('drive/MyDrive/0_neural_net_weather_forecasts_on_cloud/Data')
df = pd.read_csv('weather_data.csv')
#get time
df['datetime'] = pd.to_datetime(df['datetime'], format='%d/%m/%Y')
df = df.set_index('datetime')
print(df.columns)
#get windspeed
wind = df['windspeed']
#split data (Save a week for testing. Train and Validation made in class)
wind_train = wind.iloc[:-7]
wind_test = wind.iloc[-7:]
#inspect
wind_train.iloc[-60:].plot()
###Output
_____no_output_____
###Markdown
Define the model class
###Code
class BuildModel():
"""
Build a model. Arguments allow one to customise the hyper parameters
ATTRIBUTES :-
length - number of steps in time sequence to feed the rnn
layers_num - number of rnn layers in model (capped at 3)
layers_type - select "LSTM" or "GRU"
units - number of units in rnn layers
num_step_preds - number of steps/days in time to predict
dropout - dropout % to be applied to rnn units
g_filt - gaussian filter for smoothing. Default: no smoothing
batch_size - number of samples to feed model at a time.
patience - how many epochs to wait before stopping model after finding good score.
model_name - file name of model we save. must end in ".h5" eg 'temp_model.h5'
"""
def __init__(self, model_name, length=10, layers_num=1, layers_type='LSTM',\
units=50, dropout=0.0, g_filt=00.1, num_step_preds=1,\
epochs=8, batch_size=1, patience=5):
#assertions for input
assert 0 < layers_num < 4, "1 <= layers_num <= 3"
assert layers_type in ['LSTM', 'GRU'], "layers_type is LSTM or GRU"
assert 0 <= dropout < 1, "dropout must be float < 1"
assert model_name[-3:] == '.h5', "End model_name with '.h5'"
#initialise
self.model_name = model_name
self.length = length
self.layers_num = layers_num
self.layers_type = layers_type
self.units = units
self.num_step_preds = num_step_preds
self.dropout = dropout
self.g_filt = g_filt
self.epochs = epochs
self.batch_size = batch_size
self.n_features = 1
#callbacks
self.callbacks =[EarlyStopping(monitor='val_loss', patience=patience),\
ModelCheckpoint(self.model_name, monitor='val_loss',\
save_best_only=True)]
#BUILD MODEL
##inputs
self.model = Sequential()
self.model.add(InputLayer(input_shape=(self.length, self.n_features)))
##add extra layers as required (or not if layers_num = 1)
for i in range(layers_num - 1):
self.model.add(eval('{}(units={}, dropout={}, return_sequences=True)'\
.format(self.layers_type, self.units, self.dropout)))
##closing rnn layer (do not return squences)
self.model.add(eval('{}(units={}, dropout={})'\
.format(self.layers_type, self.units, self.dropout)))
##Dense output
self.model.add(Dense(units=self.num_step_preds))
#compile model
self.model.compile(optimizer='adam', loss='mse', metrics=['mae'])
def setupData(self, series, val_days=450):
"""
splits data, scales data, creates generators for the model
"""
assert val_days > self.length , "val_days must exceed lenght"
#split data into train and validation
self.train = series.iloc[:-val_days]
self.validation = series.iloc[-val_days:]
#Apply smoothing filters
self.train_smooth = \
gaussian_filter1d(self.train, self.g_filt)\
.reshape(-1,1)
self.validation_smooth = \
gaussian_filter1d(self.validation, self.g_filt)\
.reshape(-1,1)
#create time series generators
self.generator = \
TimeseriesGenerator(data=self.train_smooth,\
targets=self.train_smooth,\
length=self.length,\
batch_size=self.batch_size)
self.val_generator = \
TimeseriesGenerator(data=self.validation_smooth,\
targets=self.validation_smooth,\
length=self.length,\
batch_size=self.batch_size)
def fitModel(self):
"""
Fits the model on your generators for training and validation sets.
EarlyStopping call back ends training if val_loss doesnt improve.
Record epoch metrics in a DataFrame.
"""
self.model.fit(self.generator, validation_data=self.val_generator,\
epochs=self.epochs, callbacks=self.callbacks)
self.history = pd.DataFrame(self.model.history.history)
def loadModel(self):
"""
Load a model instead of fitting a new one (uses model_name)
"""
self.model = tf.keras.models.load_model(self.model_name)
def predAhead(self, days, series=None):
"""
Predicts a number of days ahead set by the user. Input your own
series or dont if you want to predict off of the validation set.
"""
assert self.num_step_preds == 1,\
"sorry, function not yet available for multi step models"
#use end of the validation set to project forward if no series given
if series is None:
series = self.validation
#get end of the series to plug into the model
assert len(series) >= self.length,\
"series must be at least {} days".format(self.length)
series_cut = series.iloc[-self.length:].values.reshape(-1,1)
#predict ahead by appending predictions and removing first values
pred_series = series_cut.reshape(1, self.length, self.n_features)
predictions = []
for i in range(days):
pred = self.model.predict(pred_series)
pred_series = np.append(pred_series[:,1:,:], [pred], axis=1)
predictions.append(pred)
#convert to pandas series
predictions = np.array(predictions)
predictions = pd.Series(predictions.reshape(days))
predictions.index = self.validation.index[-days:] +\
dt.timedelta(days=days)
return predictions
def plotPreds(self, predictions, test_series=None, run_up=None,\
ylabel='units'):
"""
plot the predictions of the model. plot them against another series
(test series). plot with with a run up leading to the pred period
(validation set).
"""
#set up figure
plt.figure(figsize=(10,6))
plt.ylabel(ylabel)
plt.xlabel('datetime')
#plot lines
if run_up is None:
run_up = self.validation[-7:]
if test_series is not None:
plt.plot(pd.concat([run_up, test_series[:1]]))
plt.plot(test_series)
else:
plt.plot(run_up)
#plot points
plt.scatter(predictions.index, predictions, edgecolors='k',\
label='predictions', c='#2ca02c', s=64)
if test_series is not None:
plt.scatter(test_series.index, test_series, marker='X',\
edgecolors='k', label='test_data', c='#ff7f0e', s=200)
plt.legend()
def fastSearch(data: pd.Series, length: list, layers_num: list,\
layers_type: list, units: list, g_filt: list, model_name: str,\
best_dict=None):
"""
First it will set all hyperparameters to their first value in the lists we
pass in.
Then list by list it will train the model, keeping the best performing
element in that list.
Its recommended that you pass in the resulting dictionary into this
function a second time.
"""
#record time for file_name
time_now = str(round(time.time()))
#set initial values if no specified parameters given.
if best_dict is None:
best_dict = {}
best_dict['length'] = [length[0], length]
best_dict['layers_num'] = [layers_num[0], layers_num]
best_dict['layers_type'] = [layers_type[0], layers_type]
best_dict['units'] = [units[0], units]
best_dict['g_filt'] = [g_filt[0], g_filt]
records = pd.DataFrame()
#go through each hyperparameter
for key in best_dict.keys():
if len(best_dict[key][1]) == 0:
continue
scores = []
#go through each value
for item in best_dict[key][1]:
best_dict[key][0] = item
model = \
BuildModel(model_name=model_name,\
length=best_dict['length'][0],\
layers_num=best_dict['layers_num'][0], \
layers_type=best_dict['layers_type'][0],\
units=best_dict['units'][0],\
g_filt=best_dict['g_filt'][0], num_step_preds=1,\
epochs=120, batch_size=10, patience=15)
#setup data and train the model
model.setupData(data)
model.fitModel()
#calculate val_mae in unsmoothed original units
best_model = tf.keras.models.load_model(model_name)
preds = best_model.predict(model.val_generator)
preds = pd.Series(preds[:,0],\
index = model.validation[model.length:].index)
val_mae_og = (preds - model.validation[model.length:]).abs()\
.mean()
record = pd.DataFrame(best_dict).iloc[:1]
record['val_mae_og'] = val_mae_og
#append score
scores.append(val_mae_og)
records = records.append(record)
records.to_csv('records_' + time_now + '.csv', index=False)
#get param value that performed the best
best_score = min(scores)
best_dict[key][0] = best_dict[key][1][scores.index(best_score)]
return records, best_dict
###Output
_____no_output_____
###Markdown
Use functions and class to optimise a model
###Code
length = [15, 30, 60]
layers_num = [1, 2]
layers_type = ['LSTM']
units = [20, 40, 80]
g_filt = [0.5, 0.75, 1.0, 1.25]
model_name = 'windspeed_model.h5'
records, best_dict = fastSearch(wind_train, length, layers_num, layers_type, units, g_filt, model_name=model_name, best_dict=None)
records2, best_dict2 = fastSearch(wind_train, length, layers_num, layers_type, units, g_filt, model_name=model_name, best_dict=best_dict)
records_all = pd.concat([records,records2])
###Output
Epoch 1/120
193/193 [==============================] - 9s 6ms/step - loss: 102.9898 - mae: 8.5735 - val_loss: 82.2140 - val_mae: 7.9981
Epoch 2/120
193/193 [==============================] - 1s 4ms/step - loss: 36.6651 - mae: 4.6356 - val_loss: 38.9598 - val_mae: 4.9047
Epoch 3/120
193/193 [==============================] - 1s 4ms/step - loss: 20.6372 - mae: 3.2892 - val_loss: 25.2359 - val_mae: 3.8174
Epoch 4/120
193/193 [==============================] - 1s 4ms/step - loss: 15.9589 - mae: 2.8945 - val_loss: 20.5559 - val_mae: 3.4515
Epoch 5/120
193/193 [==============================] - 1s 4ms/step - loss: 13.7239 - mae: 2.6912 - val_loss: 17.5182 - val_mae: 3.1843
Epoch 6/120
193/193 [==============================] - 1s 4ms/step - loss: 12.2725 - mae: 2.5679 - val_loss: 15.4971 - val_mae: 3.0199
Epoch 7/120
193/193 [==============================] - 1s 4ms/step - loss: 11.4784 - mae: 2.4965 - val_loss: 14.5550 - val_mae: 2.9185
Epoch 8/120
193/193 [==============================] - 1s 4ms/step - loss: 10.9679 - mae: 2.4512 - val_loss: 13.8088 - val_mae: 2.8335
Epoch 9/120
193/193 [==============================] - 1s 4ms/step - loss: 10.5199 - mae: 2.4050 - val_loss: 13.0951 - val_mae: 2.7648
Epoch 10/120
193/193 [==============================] - 1s 4ms/step - loss: 10.1449 - mae: 2.3587 - val_loss: 12.2948 - val_mae: 2.7262
Epoch 11/120
193/193 [==============================] - 1s 4ms/step - loss: 9.8840 - mae: 2.3495 - val_loss: 12.1082 - val_mae: 2.6831
Epoch 12/120
193/193 [==============================] - 1s 4ms/step - loss: 9.7451 - mae: 2.3219 - val_loss: 11.7038 - val_mae: 2.6722
Epoch 13/120
193/193 [==============================] - 1s 4ms/step - loss: 9.6136 - mae: 2.3232 - val_loss: 11.4502 - val_mae: 2.6464
Epoch 14/120
193/193 [==============================] - 1s 4ms/step - loss: 9.4382 - mae: 2.3075 - val_loss: 11.3384 - val_mae: 2.6243
Epoch 15/120
193/193 [==============================] - 1s 4ms/step - loss: 9.3586 - mae: 2.3017 - val_loss: 11.2987 - val_mae: 2.6062
Epoch 16/120
193/193 [==============================] - 1s 5ms/step - loss: 9.2485 - mae: 2.2850 - val_loss: 11.0830 - val_mae: 2.5979
Epoch 17/120
193/193 [==============================] - 1s 4ms/step - loss: 9.1846 - mae: 2.2819 - val_loss: 11.1557 - val_mae: 2.5872
Epoch 18/120
193/193 [==============================] - 1s 4ms/step - loss: 9.0944 - mae: 2.2813 - val_loss: 11.1157 - val_mae: 2.5820
Epoch 19/120
193/193 [==============================] - 1s 4ms/step - loss: 9.0528 - mae: 2.2693 - val_loss: 10.9849 - val_mae: 2.5760
Epoch 20/120
193/193 [==============================] - 1s 4ms/step - loss: 9.0233 - mae: 2.2689 - val_loss: 10.9485 - val_mae: 2.5727
Epoch 21/120
193/193 [==============================] - 1s 4ms/step - loss: 8.9747 - mae: 2.2681 - val_loss: 11.1215 - val_mae: 2.5761
Epoch 22/120
193/193 [==============================] - 1s 4ms/step - loss: 8.9402 - mae: 2.2562 - val_loss: 10.6692 - val_mae: 2.5831
Epoch 23/120
193/193 [==============================] - 1s 4ms/step - loss: 8.9524 - mae: 2.2725 - val_loss: 10.7282 - val_mae: 2.5599
Epoch 24/120
193/193 [==============================] - 1s 4ms/step - loss: 8.9116 - mae: 2.2549 - val_loss: 10.8092 - val_mae: 2.5693
Epoch 25/120
193/193 [==============================] - 1s 4ms/step - loss: 8.8490 - mae: 2.2541 - val_loss: 10.6496 - val_mae: 2.5526
Epoch 26/120
193/193 [==============================] - 1s 4ms/step - loss: 8.8577 - mae: 2.2573 - val_loss: 10.6735 - val_mae: 2.5548
Epoch 27/120
193/193 [==============================] - 1s 4ms/step - loss: 8.8271 - mae: 2.2582 - val_loss: 10.5758 - val_mae: 2.5716
Epoch 28/120
193/193 [==============================] - 1s 4ms/step - loss: 8.8201 - mae: 2.2508 - val_loss: 10.5495 - val_mae: 2.5617
Epoch 29/120
193/193 [==============================] - 1s 4ms/step - loss: 8.8480 - mae: 2.2579 - val_loss: 10.7012 - val_mae: 2.5484
Epoch 30/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7662 - mae: 2.2502 - val_loss: 10.6394 - val_mae: 2.5572
Epoch 31/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7385 - mae: 2.2454 - val_loss: 10.6550 - val_mae: 2.5690
Epoch 32/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7054 - mae: 2.2391 - val_loss: 10.6583 - val_mae: 2.5368
Epoch 33/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7467 - mae: 2.2539 - val_loss: 11.1412 - val_mae: 2.5630
Epoch 34/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7284 - mae: 2.2355 - val_loss: 10.5814 - val_mae: 2.5452
Epoch 35/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6677 - mae: 2.2425 - val_loss: 10.5712 - val_mae: 2.5505
Epoch 36/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6144 - mae: 2.2264 - val_loss: 10.6580 - val_mae: 2.5576
Epoch 37/120
193/193 [==============================] - 1s 4ms/step - loss: 8.7333 - mae: 2.2509 - val_loss: 10.9461 - val_mae: 2.5473
Epoch 38/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6164 - mae: 2.2309 - val_loss: 11.0234 - val_mae: 2.5568
Epoch 39/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6321 - mae: 2.2324 - val_loss: 10.8174 - val_mae: 2.5416
Epoch 40/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5687 - mae: 2.2204 - val_loss: 10.5796 - val_mae: 2.5489
Epoch 41/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5943 - mae: 2.2276 - val_loss: 10.8227 - val_mae: 2.5419
Epoch 42/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6129 - mae: 2.2255 - val_loss: 10.4823 - val_mae: 2.5696
Epoch 43/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6171 - mae: 2.2400 - val_loss: 10.6926 - val_mae: 2.5391
Epoch 44/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5602 - mae: 2.2266 - val_loss: 10.5435 - val_mae: 2.5376
Epoch 45/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5761 - mae: 2.2305 - val_loss: 10.8760 - val_mae: 2.5386
Epoch 46/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5536 - mae: 2.2189 - val_loss: 10.4737 - val_mae: 2.5479
Epoch 47/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6192 - mae: 2.2429 - val_loss: 10.8762 - val_mae: 2.5580
Epoch 48/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5568 - mae: 2.2305 - val_loss: 10.7797 - val_mae: 2.5459
Epoch 49/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5097 - mae: 2.2209 - val_loss: 10.6652 - val_mae: 2.5437
Epoch 50/120
193/193 [==============================] - 1s 4ms/step - loss: 8.6016 - mae: 2.2371 - val_loss: 10.6007 - val_mae: 2.5473
Epoch 51/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5076 - mae: 2.2241 - val_loss: 10.4225 - val_mae: 2.5441
Epoch 52/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4789 - mae: 2.2198 - val_loss: 10.8581 - val_mae: 2.5391
Epoch 53/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5279 - mae: 2.2290 - val_loss: 10.7700 - val_mae: 2.5452
Epoch 54/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5398 - mae: 2.2181 - val_loss: 10.6172 - val_mae: 2.5405
Epoch 55/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4479 - mae: 2.2175 - val_loss: 10.6971 - val_mae: 2.5608
Epoch 56/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4527 - mae: 2.2185 - val_loss: 10.9560 - val_mae: 2.5656
Epoch 57/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5134 - mae: 2.2174 - val_loss: 10.9845 - val_mae: 2.5550
Epoch 58/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4999 - mae: 2.2214 - val_loss: 10.8421 - val_mae: 2.5603
Epoch 59/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5145 - mae: 2.2200 - val_loss: 10.5576 - val_mae: 2.5532
Epoch 60/120
193/193 [==============================] - 1s 4ms/step - loss: 8.5185 - mae: 2.2257 - val_loss: 10.8886 - val_mae: 2.5909
Epoch 61/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4922 - mae: 2.2187 - val_loss: 10.6573 - val_mae: 2.5450
Epoch 62/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4689 - mae: 2.2215 - val_loss: 10.7658 - val_mae: 2.5705
Epoch 63/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4505 - mae: 2.2110 - val_loss: 10.4780 - val_mae: 2.5672
Epoch 64/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4688 - mae: 2.2209 - val_loss: 10.9126 - val_mae: 2.5583
Epoch 65/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4682 - mae: 2.2087 - val_loss: 10.6133 - val_mae: 2.5491
Epoch 66/120
193/193 [==============================] - 1s 4ms/step - loss: 8.4291 - mae: 2.2134 - val_loss: 10.6057 - val_mae: 2.5652
Epoch 1/120
192/192 [==============================] - 2s 6ms/step - loss: 75.5513 - mae: 7.0420 - val_loss: 49.8008 - val_mae: 5.8144
Epoch 2/120
192/192 [==============================] - 1s 4ms/step - loss: 26.9782 - mae: 3.9032 - val_loss: 29.4137 - val_mae: 4.1548
Epoch 3/120
192/192 [==============================] - 1s 4ms/step - loss: 18.2644 - mae: 3.1075 - val_loss: 21.3893 - val_mae: 3.5068
Epoch 4/120
192/192 [==============================] - 1s 4ms/step - loss: 15.2662 - mae: 2.8570 - val_loss: 18.2926 - val_mae: 3.2566
Epoch 5/120
192/192 [==============================] - 1s 4ms/step - loss: 13.6563 - mae: 2.7052 - val_loss: 16.6125 - val_mae: 3.1025
Epoch 6/120
192/192 [==============================] - 1s 4ms/step - loss: 12.4442 - mae: 2.5844 - val_loss: 14.8516 - val_mae: 2.9511
Epoch 7/120
192/192 [==============================] - 1s 5ms/step - loss: 11.6541 - mae: 2.5081 - val_loss: 13.8085 - val_mae: 2.8557
Epoch 8/120
192/192 [==============================] - 1s 4ms/step - loss: 10.9818 - mae: 2.4573 - val_loss: 12.9892 - val_mae: 2.7736
Epoch 9/120
192/192 [==============================] - 1s 4ms/step - loss: 10.5274 - mae: 2.4012 - val_loss: 12.4264 - val_mae: 2.7081
Epoch 10/120
192/192 [==============================] - 1s 4ms/step - loss: 10.2272 - mae: 2.3794 - val_loss: 12.0140 - val_mae: 2.6641
Epoch 11/120
192/192 [==============================] - 1s 4ms/step - loss: 10.0260 - mae: 2.3652 - val_loss: 11.7963 - val_mae: 2.6332
Epoch 12/120
192/192 [==============================] - 1s 4ms/step - loss: 9.7689 - mae: 2.3333 - val_loss: 11.3748 - val_mae: 2.6103
Epoch 13/120
192/192 [==============================] - 1s 4ms/step - loss: 9.7073 - mae: 2.3379 - val_loss: 11.2850 - val_mae: 2.5863
Epoch 14/120
192/192 [==============================] - 1s 4ms/step - loss: 9.4714 - mae: 2.3125 - val_loss: 11.0384 - val_mae: 2.5717
Epoch 15/120
192/192 [==============================] - 1s 4ms/step - loss: 9.4539 - mae: 2.3098 - val_loss: 10.8792 - val_mae: 2.5530
Epoch 16/120
192/192 [==============================] - 1s 4ms/step - loss: 9.2818 - mae: 2.2944 - val_loss: 10.8738 - val_mae: 2.5456
Epoch 17/120
192/192 [==============================] - 1s 4ms/step - loss: 9.2347 - mae: 2.2945 - val_loss: 10.7068 - val_mae: 2.5411
Epoch 18/120
192/192 [==============================] - 1s 4ms/step - loss: 9.1435 - mae: 2.2825 - val_loss: 10.7186 - val_mae: 2.5309
Epoch 19/120
192/192 [==============================] - 1s 4ms/step - loss: 9.1357 - mae: 2.2920 - val_loss: 10.7056 - val_mae: 2.5234
Epoch 20/120
192/192 [==============================] - 1s 4ms/step - loss: 9.1089 - mae: 2.2880 - val_loss: 10.4769 - val_mae: 2.5362
Epoch 21/120
192/192 [==============================] - 1s 4ms/step - loss: 9.0088 - mae: 2.2801 - val_loss: 10.8716 - val_mae: 2.5147
Epoch 22/120
192/192 [==============================] - 1s 4ms/step - loss: 9.0565 - mae: 2.2857 - val_loss: 10.7129 - val_mae: 2.5092
Epoch 23/120
192/192 [==============================] - 1s 4ms/step - loss: 8.9165 - mae: 2.2649 - val_loss: 10.5495 - val_mae: 2.5021
Epoch 24/120
192/192 [==============================] - 1s 4ms/step - loss: 8.9019 - mae: 2.2611 - val_loss: 10.4276 - val_mae: 2.5068
Epoch 25/120
192/192 [==============================] - 1s 4ms/step - loss: 8.9089 - mae: 2.2626 - val_loss: 10.4333 - val_mae: 2.5011
Epoch 26/120
192/192 [==============================] - 1s 4ms/step - loss: 8.9435 - mae: 2.2881 - val_loss: 10.3929 - val_mae: 2.5023
Epoch 27/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8720 - mae: 2.2559 - val_loss: 10.6782 - val_mae: 2.5089
Epoch 28/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8657 - mae: 2.2678 - val_loss: 10.3450 - val_mae: 2.5014
Epoch 29/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8428 - mae: 2.2605 - val_loss: 10.2974 - val_mae: 2.5044
Epoch 30/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8811 - mae: 2.2663 - val_loss: 10.3579 - val_mae: 2.4966
Epoch 31/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8149 - mae: 2.2553 - val_loss: 10.2225 - val_mae: 2.5084
Epoch 32/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7989 - mae: 2.2582 - val_loss: 10.3342 - val_mae: 2.4974
Epoch 33/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8140 - mae: 2.2657 - val_loss: 10.3842 - val_mae: 2.4930
Epoch 34/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7851 - mae: 2.2627 - val_loss: 10.3721 - val_mae: 2.4931
Epoch 35/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7592 - mae: 2.2539 - val_loss: 10.2852 - val_mae: 2.5017
Epoch 36/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8059 - mae: 2.2615 - val_loss: 10.3086 - val_mae: 2.5101
Epoch 37/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7755 - mae: 2.2524 - val_loss: 10.2848 - val_mae: 2.5299
Epoch 38/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7353 - mae: 2.2580 - val_loss: 10.4290 - val_mae: 2.5898
Epoch 39/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7542 - mae: 2.2579 - val_loss: 10.3680 - val_mae: 2.4928
Epoch 40/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7533 - mae: 2.2524 - val_loss: 10.2179 - val_mae: 2.4995
Epoch 41/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8170 - mae: 2.2652 - val_loss: 10.3209 - val_mae: 2.5001
Epoch 42/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7192 - mae: 2.2527 - val_loss: 10.2523 - val_mae: 2.5458
Epoch 43/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7153 - mae: 2.2458 - val_loss: 10.2374 - val_mae: 2.5056
Epoch 44/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6691 - mae: 2.2557 - val_loss: 10.3271 - val_mae: 2.5478
Epoch 45/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6926 - mae: 2.2449 - val_loss: 10.2575 - val_mae: 2.4962
Epoch 46/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7045 - mae: 2.2532 - val_loss: 10.3379 - val_mae: 2.4960
Epoch 47/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6785 - mae: 2.2408 - val_loss: 10.3721 - val_mae: 2.5038
Epoch 48/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6699 - mae: 2.2496 - val_loss: 10.2432 - val_mae: 2.5188
Epoch 49/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6677 - mae: 2.2474 - val_loss: 10.4030 - val_mae: 2.5042
Epoch 50/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7988 - mae: 2.2575 - val_loss: 10.3358 - val_mae: 2.5017
Epoch 51/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7300 - mae: 2.2558 - val_loss: 10.2846 - val_mae: 2.5114
Epoch 52/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7205 - mae: 2.2592 - val_loss: 10.3162 - val_mae: 2.5054
Epoch 53/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6798 - mae: 2.2405 - val_loss: 10.2240 - val_mae: 2.5130
Epoch 54/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6471 - mae: 2.2395 - val_loss: 10.2159 - val_mae: 2.5229
Epoch 55/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6252 - mae: 2.2405 - val_loss: 10.3301 - val_mae: 2.4969
Epoch 56/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6456 - mae: 2.2420 - val_loss: 10.6939 - val_mae: 2.5036
Epoch 57/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6409 - mae: 2.2395 - val_loss: 10.1474 - val_mae: 2.5256
Epoch 58/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6611 - mae: 2.2453 - val_loss: 10.2702 - val_mae: 2.4976
Epoch 59/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6252 - mae: 2.2373 - val_loss: 10.6019 - val_mae: 2.5009
Epoch 60/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6909 - mae: 2.2455 - val_loss: 10.3989 - val_mae: 2.5057
Epoch 61/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6813 - mae: 2.2456 - val_loss: 10.2135 - val_mae: 2.5154
Epoch 62/120
192/192 [==============================] - 1s 4ms/step - loss: 8.5697 - mae: 2.2310 - val_loss: 10.6348 - val_mae: 2.6235
Epoch 63/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6712 - mae: 2.2478 - val_loss: 10.2081 - val_mae: 2.5027
Epoch 64/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6152 - mae: 2.2409 - val_loss: 10.3409 - val_mae: 2.5741
Epoch 65/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6442 - mae: 2.2458 - val_loss: 10.3269 - val_mae: 2.5082
Epoch 66/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6344 - mae: 2.2332 - val_loss: 10.3398 - val_mae: 2.5079
Epoch 67/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6371 - mae: 2.2362 - val_loss: 10.4632 - val_mae: 2.4968
Epoch 68/120
192/192 [==============================] - 1s 4ms/step - loss: 8.9263 - mae: 2.2816 - val_loss: 10.2883 - val_mae: 2.5081
Epoch 69/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6669 - mae: 2.2443 - val_loss: 10.2221 - val_mae: 2.5062
Epoch 70/120
192/192 [==============================] - 1s 4ms/step - loss: 8.5869 - mae: 2.2355 - val_loss: 10.2767 - val_mae: 2.5085
Epoch 71/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6124 - mae: 2.2422 - val_loss: 10.2666 - val_mae: 2.5509
Epoch 72/120
192/192 [==============================] - 1s 5ms/step - loss: 8.5911 - mae: 2.2297 - val_loss: 10.1590 - val_mae: 2.5225
Epoch 1/120
189/189 [==============================] - 3s 8ms/step - loss: 79.6780 - mae: 7.5141 - val_loss: 69.2531 - val_mae: 7.1896
Epoch 2/120
189/189 [==============================] - 1s 5ms/step - loss: 32.8572 - mae: 4.3591 - val_loss: 32.3264 - val_mae: 4.3751
Epoch 3/120
189/189 [==============================] - 1s 5ms/step - loss: 19.2780 - mae: 3.1871 - val_loss: 22.8465 - val_mae: 3.6194
Epoch 4/120
189/189 [==============================] - 1s 5ms/step - loss: 15.8059 - mae: 2.9116 - val_loss: 19.1829 - val_mae: 3.3260
Epoch 5/120
189/189 [==============================] - 1s 5ms/step - loss: 14.1849 - mae: 2.7618 - val_loss: 17.0166 - val_mae: 3.1394
Epoch 6/120
189/189 [==============================] - 1s 5ms/step - loss: 12.7945 - mae: 2.6351 - val_loss: 15.5708 - val_mae: 2.9971
Epoch 7/120
189/189 [==============================] - 1s 5ms/step - loss: 11.8979 - mae: 2.5508 - val_loss: 14.5077 - val_mae: 2.8834
Epoch 8/120
189/189 [==============================] - 1s 5ms/step - loss: 11.2758 - mae: 2.4769 - val_loss: 13.7724 - val_mae: 2.8007
Epoch 9/120
189/189 [==============================] - 1s 5ms/step - loss: 10.8372 - mae: 2.4392 - val_loss: 12.8656 - val_mae: 2.7292
Epoch 10/120
189/189 [==============================] - 1s 5ms/step - loss: 10.4353 - mae: 2.4096 - val_loss: 12.3030 - val_mae: 2.6842
Epoch 11/120
189/189 [==============================] - 1s 5ms/step - loss: 10.1791 - mae: 2.3852 - val_loss: 12.2240 - val_mae: 2.6477
Epoch 12/120
189/189 [==============================] - 1s 5ms/step - loss: 10.0447 - mae: 2.3739 - val_loss: 11.7836 - val_mae: 2.6156
Epoch 13/120
189/189 [==============================] - 1s 5ms/step - loss: 9.8004 - mae: 2.3548 - val_loss: 12.0130 - val_mae: 2.6087
Epoch 14/120
189/189 [==============================] - 1s 5ms/step - loss: 9.7237 - mae: 2.3519 - val_loss: 11.3019 - val_mae: 2.5792
Epoch 15/120
189/189 [==============================] - 1s 5ms/step - loss: 9.5773 - mae: 2.3393 - val_loss: 11.1059 - val_mae: 2.5712
Epoch 16/120
189/189 [==============================] - 1s 5ms/step - loss: 9.4510 - mae: 2.3197 - val_loss: 11.1110 - val_mae: 2.5571
Epoch 17/120
189/189 [==============================] - 1s 5ms/step - loss: 9.4025 - mae: 2.3213 - val_loss: 10.9291 - val_mae: 2.5534
Epoch 18/120
189/189 [==============================] - 1s 5ms/step - loss: 9.3563 - mae: 2.3197 - val_loss: 10.9746 - val_mae: 2.5475
Epoch 19/120
189/189 [==============================] - 1s 5ms/step - loss: 9.2654 - mae: 2.3093 - val_loss: 10.9812 - val_mae: 2.5312
Epoch 20/120
189/189 [==============================] - 1s 5ms/step - loss: 9.1652 - mae: 2.2952 - val_loss: 10.9372 - val_mae: 2.5266
Epoch 21/120
189/189 [==============================] - 1s 5ms/step - loss: 9.2137 - mae: 2.3077 - val_loss: 11.0479 - val_mae: 2.5342
Epoch 22/120
189/189 [==============================] - 1s 5ms/step - loss: 9.1363 - mae: 2.3028 - val_loss: 10.6296 - val_mae: 2.5252
Epoch 23/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0798 - mae: 2.2968 - val_loss: 10.6519 - val_mae: 2.5340
Epoch 24/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0520 - mae: 2.2918 - val_loss: 10.6462 - val_mae: 2.5147
Epoch 25/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0396 - mae: 2.2961 - val_loss: 10.6514 - val_mae: 2.5119
Epoch 26/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0711 - mae: 2.3017 - val_loss: 11.1931 - val_mae: 2.5316
Epoch 27/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0062 - mae: 2.2888 - val_loss: 10.6526 - val_mae: 2.5105
Epoch 28/120
189/189 [==============================] - 1s 5ms/step - loss: 9.1337 - mae: 2.3077 - val_loss: 10.7814 - val_mae: 2.5160
Epoch 29/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0970 - mae: 2.3102 - val_loss: 10.4648 - val_mae: 2.5186
Epoch 30/120
189/189 [==============================] - 1s 5ms/step - loss: 8.9470 - mae: 2.2823 - val_loss: 10.7872 - val_mae: 2.5088
Epoch 31/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8602 - mae: 2.2731 - val_loss: 10.4500 - val_mae: 2.5271
Epoch 32/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0137 - mae: 2.2922 - val_loss: 10.5368 - val_mae: 2.5555
Epoch 33/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8714 - mae: 2.2760 - val_loss: 10.4605 - val_mae: 2.5132
Epoch 34/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8765 - mae: 2.2889 - val_loss: 10.4839 - val_mae: 2.5046
Epoch 35/120
189/189 [==============================] - 1s 5ms/step - loss: 8.9761 - mae: 2.2953 - val_loss: 10.6897 - val_mae: 2.5083
Epoch 36/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8752 - mae: 2.2776 - val_loss: 10.6213 - val_mae: 2.5027
Epoch 37/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8383 - mae: 2.2741 - val_loss: 10.4840 - val_mae: 2.5053
Epoch 38/120
189/189 [==============================] - 1s 5ms/step - loss: 9.0348 - mae: 2.3067 - val_loss: 10.5366 - val_mae: 2.4981
Epoch 39/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8284 - mae: 2.2761 - val_loss: 10.5014 - val_mae: 2.5093
Epoch 40/120
189/189 [==============================] - 1s 5ms/step - loss: 8.9381 - mae: 2.2916 - val_loss: 10.4613 - val_mae: 2.5086
Epoch 41/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8344 - mae: 2.2698 - val_loss: 10.4947 - val_mae: 2.5092
Epoch 42/120
189/189 [==============================] - 1s 5ms/step - loss: 8.7768 - mae: 2.2692 - val_loss: 10.4827 - val_mae: 2.5264
Epoch 43/120
189/189 [==============================] - 1s 5ms/step - loss: 8.7773 - mae: 2.2680 - val_loss: 10.4542 - val_mae: 2.5452
Epoch 44/120
189/189 [==============================] - 1s 5ms/step - loss: 8.8236 - mae: 2.2774 - val_loss: 10.5366 - val_mae: 2.5653
Epoch 45/120
189/189 [==============================] - 1s 5ms/step - loss: 8.7967 - mae: 2.2802 - val_loss: 10.4841 - val_mae: 2.5070
Epoch 46/120
189/189 [==============================] - 1s 5ms/step - loss: 8.9218 - mae: 2.2895 - val_loss: 10.4901 - val_mae: 2.5739
Epoch 1/120
192/192 [==============================] - 2s 6ms/step - loss: 88.2063 - mae: 7.9205 - val_loss: 66.7991 - val_mae: 7.0538
Epoch 2/120
192/192 [==============================] - 1s 4ms/step - loss: 31.4810 - mae: 4.2085 - val_loss: 29.8336 - val_mae: 4.1892
Epoch 3/120
192/192 [==============================] - 1s 4ms/step - loss: 18.0396 - mae: 3.0821 - val_loss: 20.9731 - val_mae: 3.4738
Epoch 4/120
192/192 [==============================] - 1s 4ms/step - loss: 14.9910 - mae: 2.8362 - val_loss: 17.7064 - val_mae: 3.1981
Epoch 5/120
192/192 [==============================] - 1s 5ms/step - loss: 13.1813 - mae: 2.6630 - val_loss: 15.9225 - val_mae: 3.0362
Epoch 6/120
192/192 [==============================] - 1s 5ms/step - loss: 12.1639 - mae: 2.5647 - val_loss: 14.3029 - val_mae: 2.9080
Epoch 7/120
192/192 [==============================] - 1s 4ms/step - loss: 11.4083 - mae: 2.5027 - val_loss: 13.3299 - val_mae: 2.8159
Epoch 8/120
192/192 [==============================] - 1s 5ms/step - loss: 10.8367 - mae: 2.4406 - val_loss: 12.7031 - val_mae: 2.7393
Epoch 9/120
192/192 [==============================] - 1s 5ms/step - loss: 10.4283 - mae: 2.4005 - val_loss: 12.5763 - val_mae: 2.6938
Epoch 10/120
192/192 [==============================] - 1s 5ms/step - loss: 10.1904 - mae: 2.3879 - val_loss: 12.3121 - val_mae: 2.6573
Epoch 11/120
192/192 [==============================] - 1s 5ms/step - loss: 9.9150 - mae: 2.3452 - val_loss: 11.4478 - val_mae: 2.6241
Epoch 12/120
192/192 [==============================] - 1s 5ms/step - loss: 9.7273 - mae: 2.3403 - val_loss: 11.2179 - val_mae: 2.6011
Epoch 13/120
192/192 [==============================] - 1s 4ms/step - loss: 9.6201 - mae: 2.3310 - val_loss: 11.3567 - val_mae: 2.5788
Epoch 14/120
192/192 [==============================] - 1s 5ms/step - loss: 9.4637 - mae: 2.3100 - val_loss: 11.0200 - val_mae: 2.5574
Epoch 15/120
192/192 [==============================] - 1s 5ms/step - loss: 9.3442 - mae: 2.3052 - val_loss: 10.8163 - val_mae: 2.5552
Epoch 16/120
192/192 [==============================] - 1s 4ms/step - loss: 9.2566 - mae: 2.2925 - val_loss: 10.6795 - val_mae: 2.5376
Epoch 17/120
192/192 [==============================] - 1s 4ms/step - loss: 9.1961 - mae: 2.2974 - val_loss: 10.5411 - val_mae: 2.5493
Epoch 18/120
192/192 [==============================] - 1s 5ms/step - loss: 9.1731 - mae: 2.2921 - val_loss: 10.6122 - val_mae: 2.5198
Epoch 19/120
192/192 [==============================] - 1s 5ms/step - loss: 9.0840 - mae: 2.2855 - val_loss: 10.4617 - val_mae: 2.5311
Epoch 20/120
192/192 [==============================] - 1s 5ms/step - loss: 9.0501 - mae: 2.2826 - val_loss: 11.4038 - val_mae: 2.5498
Epoch 21/120
192/192 [==============================] - 1s 5ms/step - loss: 9.0271 - mae: 2.2745 - val_loss: 10.4943 - val_mae: 2.5056
Epoch 22/120
192/192 [==============================] - 1s 4ms/step - loss: 9.0040 - mae: 2.2810 - val_loss: 10.4267 - val_mae: 2.5185
Epoch 23/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8989 - mae: 2.2663 - val_loss: 10.3174 - val_mae: 2.5222
Epoch 24/120
192/192 [==============================] - 1s 5ms/step - loss: 8.9258 - mae: 2.2656 - val_loss: 10.2984 - val_mae: 2.5126
Epoch 25/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8892 - mae: 2.2673 - val_loss: 10.3386 - val_mae: 2.5080
Epoch 26/120
192/192 [==============================] - 1s 5ms/step - loss: 9.0077 - mae: 2.2748 - val_loss: 10.2839 - val_mae: 2.5134
Epoch 27/120
192/192 [==============================] - 1s 5ms/step - loss: 8.9136 - mae: 2.2732 - val_loss: 10.2523 - val_mae: 2.5131
Epoch 28/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8194 - mae: 2.2619 - val_loss: 10.3296 - val_mae: 2.4933
Epoch 29/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8643 - mae: 2.2693 - val_loss: 10.3579 - val_mae: 2.5432
Epoch 30/120
192/192 [==============================] - 1s 4ms/step - loss: 8.8091 - mae: 2.2551 - val_loss: 10.2567 - val_mae: 2.5292
Epoch 31/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8435 - mae: 2.2635 - val_loss: 10.2785 - val_mae: 2.4922
Epoch 32/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7971 - mae: 2.2616 - val_loss: 10.2678 - val_mae: 2.5319
Epoch 33/120
192/192 [==============================] - 1s 4ms/step - loss: 8.7673 - mae: 2.2507 - val_loss: 10.2266 - val_mae: 2.5062
Epoch 34/120
192/192 [==============================] - 1s 5ms/step - loss: 8.9096 - mae: 2.2719 - val_loss: 10.2219 - val_mae: 2.5176
Epoch 35/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7977 - mae: 2.2659 - val_loss: 10.2112 - val_mae: 2.4995
Epoch 36/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8843 - mae: 2.2767 - val_loss: 10.4074 - val_mae: 2.5703
Epoch 37/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7313 - mae: 2.2532 - val_loss: 10.3009 - val_mae: 2.4925
Epoch 38/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7269 - mae: 2.2563 - val_loss: 10.2100 - val_mae: 2.5067
Epoch 39/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7025 - mae: 2.2440 - val_loss: 10.1937 - val_mae: 2.5273
Epoch 40/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7429 - mae: 2.2579 - val_loss: 10.3775 - val_mae: 2.4949
Epoch 41/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7003 - mae: 2.2542 - val_loss: 10.4000 - val_mae: 2.5011
Epoch 42/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6723 - mae: 2.2384 - val_loss: 10.2468 - val_mae: 2.5122
Epoch 43/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6622 - mae: 2.2410 - val_loss: 10.1735 - val_mae: 2.5188
Epoch 44/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7027 - mae: 2.2516 - val_loss: 10.3955 - val_mae: 2.5129
Epoch 45/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7184 - mae: 2.2622 - val_loss: 10.2559 - val_mae: 2.5022
Epoch 46/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6912 - mae: 2.2405 - val_loss: 10.2569 - val_mae: 2.5186
Epoch 47/120
192/192 [==============================] - 1s 5ms/step - loss: 8.8002 - mae: 2.2652 - val_loss: 10.3077 - val_mae: 2.5169
Epoch 48/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6939 - mae: 2.2482 - val_loss: 10.1628 - val_mae: 2.5246
Epoch 49/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6645 - mae: 2.2541 - val_loss: 10.2296 - val_mae: 2.5100
Epoch 50/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6186 - mae: 2.2395 - val_loss: 10.3569 - val_mae: 2.4935
Epoch 51/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6196 - mae: 2.2359 - val_loss: 10.3857 - val_mae: 2.5154
Epoch 52/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6532 - mae: 2.2458 - val_loss: 10.2209 - val_mae: 2.4991
Epoch 53/120
192/192 [==============================] - 1s 4ms/step - loss: 8.6153 - mae: 2.2369 - val_loss: 10.2270 - val_mae: 2.5071
Epoch 54/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6094 - mae: 2.2366 - val_loss: 10.3431 - val_mae: 2.5049
Epoch 55/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6115 - mae: 2.2409 - val_loss: 10.2915 - val_mae: 2.5186
Epoch 56/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7201 - mae: 2.2554 - val_loss: 10.5117 - val_mae: 2.5095
Epoch 57/120
192/192 [==============================] - 1s 5ms/step - loss: 8.7392 - mae: 2.2587 - val_loss: 10.4866 - val_mae: 2.5912
Epoch 58/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6457 - mae: 2.2419 - val_loss: 10.3562 - val_mae: 2.5088
Epoch 59/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6216 - mae: 2.2438 - val_loss: 10.3236 - val_mae: 2.5280
Epoch 60/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6021 - mae: 2.2375 - val_loss: 10.2494 - val_mae: 2.5260
Epoch 61/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6100 - mae: 2.2385 - val_loss: 10.1783 - val_mae: 2.5118
Epoch 62/120
192/192 [==============================] - 1s 4ms/step - loss: 8.5653 - mae: 2.2357 - val_loss: 10.4581 - val_mae: 2.4970
Epoch 63/120
192/192 [==============================] - 1s 5ms/step - loss: 8.6058 - mae: 2.2419 - val_loss: 10.3179 - val_mae: 2.4996
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 68.4598 - mae: 6.7726 - val_loss: 49.9427 - val_mae: 5.8181
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 27.9157 - mae: 4.0258 - val_loss: 29.0784 - val_mae: 4.1239
Epoch 3/120
192/192 [==============================] - 1s 6ms/step - loss: 18.7367 - mae: 3.1544 - val_loss: 21.6824 - val_mae: 3.5321
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 16.1653 - mae: 2.9594 - val_loss: 19.0752 - val_mae: 3.3451
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 15.2801 - mae: 2.8913 - val_loss: 18.0586 - val_mae: 3.2837
Epoch 6/120
192/192 [==============================] - 1s 6ms/step - loss: 14.9227 - mae: 2.8950 - val_loss: 17.5145 - val_mae: 3.2110
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 13.8148 - mae: 2.7433 - val_loss: 16.5949 - val_mae: 3.1123
Epoch 8/120
192/192 [==============================] - 1s 6ms/step - loss: 12.5805 - mae: 2.6190 - val_loss: 14.7454 - val_mae: 2.9851
Epoch 9/120
192/192 [==============================] - 1s 6ms/step - loss: 11.4523 - mae: 2.4952 - val_loss: 13.4173 - val_mae: 2.8273
Epoch 10/120
192/192 [==============================] - 1s 6ms/step - loss: 10.7996 - mae: 2.4256 - val_loss: 12.8708 - val_mae: 2.7226
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 10.3659 - mae: 2.3929 - val_loss: 12.1594 - val_mae: 2.6599
Epoch 12/120
192/192 [==============================] - 1s 6ms/step - loss: 10.0412 - mae: 2.3650 - val_loss: 11.5696 - val_mae: 2.6554
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 9.8266 - mae: 2.3517 - val_loss: 11.5444 - val_mae: 2.5970
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 9.6589 - mae: 2.3271 - val_loss: 11.1572 - val_mae: 2.5963
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 9.7890 - mae: 2.3567 - val_loss: 11.1431 - val_mae: 2.6216
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4785 - mae: 2.3174 - val_loss: 10.9870 - val_mae: 2.5590
Epoch 17/120
192/192 [==============================] - 1s 6ms/step - loss: 9.3615 - mae: 2.3054 - val_loss: 11.0323 - val_mae: 2.5510
Epoch 18/120
192/192 [==============================] - 1s 8ms/step - loss: 9.2430 - mae: 2.3156 - val_loss: 11.1417 - val_mae: 2.5437
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1892 - mae: 2.2898 - val_loss: 10.6196 - val_mae: 2.5297
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1274 - mae: 2.2927 - val_loss: 10.4877 - val_mae: 2.5452
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1850 - mae: 2.3036 - val_loss: 10.8154 - val_mae: 2.6066
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0415 - mae: 2.2936 - val_loss: 10.4587 - val_mae: 2.5523
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0218 - mae: 2.2915 - val_loss: 10.8650 - val_mae: 2.6075
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9054 - mae: 2.2762 - val_loss: 10.7141 - val_mae: 2.5097
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9351 - mae: 2.2770 - val_loss: 10.8067 - val_mae: 2.5149
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9212 - mae: 2.2816 - val_loss: 10.7823 - val_mae: 2.5207
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9217 - mae: 2.2746 - val_loss: 10.7150 - val_mae: 2.4986
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8581 - mae: 2.2740 - val_loss: 10.3250 - val_mae: 2.5182
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7968 - mae: 2.2664 - val_loss: 10.5907 - val_mae: 2.5445
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8126 - mae: 2.2775 - val_loss: 10.5641 - val_mae: 2.5239
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7868 - mae: 2.2692 - val_loss: 10.3398 - val_mae: 2.4993
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7270 - mae: 2.2435 - val_loss: 10.6047 - val_mae: 2.5457
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6968 - mae: 2.2507 - val_loss: 11.5282 - val_mae: 2.5537
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8433 - mae: 2.2678 - val_loss: 10.2971 - val_mae: 2.4910
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7373 - mae: 2.2637 - val_loss: 10.8358 - val_mae: 2.5013
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7603 - mae: 2.2681 - val_loss: 10.2447 - val_mae: 2.5276
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7910 - mae: 2.2609 - val_loss: 10.4073 - val_mae: 2.5027
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8689 - mae: 2.2828 - val_loss: 10.3262 - val_mae: 2.5020
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6969 - mae: 2.2473 - val_loss: 10.5129 - val_mae: 2.5453
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6761 - mae: 2.2409 - val_loss: 10.2135 - val_mae: 2.5048
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7245 - mae: 2.2638 - val_loss: 10.1239 - val_mae: 2.5099
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6023 - mae: 2.2474 - val_loss: 10.2947 - val_mae: 2.4958
Epoch 43/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5670 - mae: 2.2331 - val_loss: 10.5132 - val_mae: 2.5452
Epoch 44/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6032 - mae: 2.2499 - val_loss: 10.5104 - val_mae: 2.5121
Epoch 45/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6067 - mae: 2.2466 - val_loss: 10.6856 - val_mae: 2.5001
Epoch 46/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6538 - mae: 2.2409 - val_loss: 10.5716 - val_mae: 2.5236
Epoch 47/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5000 - mae: 2.2157 - val_loss: 10.1787 - val_mae: 2.5401
Epoch 48/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6450 - mae: 2.2453 - val_loss: 10.4221 - val_mae: 2.5005
Epoch 49/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6806 - mae: 2.2428 - val_loss: 10.1182 - val_mae: 2.5104
Epoch 50/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6382 - mae: 2.2564 - val_loss: 10.3926 - val_mae: 2.5428
Epoch 51/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6200 - mae: 2.2456 - val_loss: 10.5664 - val_mae: 2.5886
Epoch 52/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5590 - mae: 2.2418 - val_loss: 10.1660 - val_mae: 2.5596
Epoch 53/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6274 - mae: 2.2438 - val_loss: 10.2563 - val_mae: 2.5414
Epoch 54/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5874 - mae: 2.2324 - val_loss: 10.4387 - val_mae: 2.5061
Epoch 55/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5526 - mae: 2.2264 - val_loss: 10.5113 - val_mae: 2.5143
Epoch 56/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4921 - mae: 2.2269 - val_loss: 10.7427 - val_mae: 2.6702
Epoch 57/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5979 - mae: 2.2321 - val_loss: 10.5826 - val_mae: 2.5766
Epoch 58/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4772 - mae: 2.2227 - val_loss: 10.6396 - val_mae: 2.5578
Epoch 59/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4296 - mae: 2.2169 - val_loss: 10.6299 - val_mae: 2.5328
Epoch 60/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5398 - mae: 2.2254 - val_loss: 10.7579 - val_mae: 2.5219
Epoch 61/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6013 - mae: 2.2447 - val_loss: 10.3163 - val_mae: 2.5088
Epoch 62/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6738 - mae: 2.2461 - val_loss: 10.2869 - val_mae: 2.5310
Epoch 63/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5383 - mae: 2.2315 - val_loss: 10.5402 - val_mae: 2.5241
Epoch 64/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5430 - mae: 2.2324 - val_loss: 10.1462 - val_mae: 2.5255
Epoch 1/120
192/192 [==============================] - 4s 9ms/step - loss: 69.3060 - mae: 6.7198 - val_loss: 47.9370 - val_mae: 5.6586
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 25.6692 - mae: 3.7519 - val_loss: 27.4579 - val_mae: 3.9894
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 19.3196 - mae: 3.2664 - val_loss: 21.2644 - val_mae: 3.5001
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 16.4500 - mae: 2.9936 - val_loss: 18.8367 - val_mae: 3.3309
Epoch 5/120
192/192 [==============================] - 1s 6ms/step - loss: 15.4595 - mae: 2.9314 - val_loss: 18.1462 - val_mae: 3.2887
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 15.3235 - mae: 2.9361 - val_loss: 17.8271 - val_mae: 3.2719
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 14.9053 - mae: 2.8813 - val_loss: 17.8298 - val_mae: 3.2406
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 14.0242 - mae: 2.7942 - val_loss: 16.1503 - val_mae: 3.1002
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 12.5399 - mae: 2.6156 - val_loss: 14.9839 - val_mae: 3.0212
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 11.7243 - mae: 2.5428 - val_loss: 14.0220 - val_mae: 2.8245
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 10.7039 - mae: 2.4286 - val_loss: 12.8010 - val_mae: 2.7079
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 10.3030 - mae: 2.3953 - val_loss: 12.1130 - val_mae: 2.6528
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 10.0723 - mae: 2.3689 - val_loss: 12.7730 - val_mae: 2.6765
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 9.9243 - mae: 2.3672 - val_loss: 11.2048 - val_mae: 2.5930
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5671 - mae: 2.3159 - val_loss: 11.0781 - val_mae: 2.5740
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5147 - mae: 2.3238 - val_loss: 10.9575 - val_mae: 2.5617
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4666 - mae: 2.3208 - val_loss: 10.7297 - val_mae: 2.5513
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3466 - mae: 2.3172 - val_loss: 10.7766 - val_mae: 2.6144
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3225 - mae: 2.3100 - val_loss: 10.5515 - val_mae: 2.5311
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3055 - mae: 2.3162 - val_loss: 10.6927 - val_mae: 2.5202
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1385 - mae: 2.2879 - val_loss: 10.7670 - val_mae: 2.5173
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1477 - mae: 2.3070 - val_loss: 10.6100 - val_mae: 2.5022
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1936 - mae: 2.3208 - val_loss: 10.4443 - val_mae: 2.5203
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9968 - mae: 2.2857 - val_loss: 10.5591 - val_mae: 2.5035
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2174 - mae: 2.3221 - val_loss: 10.3760 - val_mae: 2.5304
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9467 - mae: 2.2775 - val_loss: 10.7989 - val_mae: 2.5147
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9781 - mae: 2.3004 - val_loss: 10.4549 - val_mae: 2.5051
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8825 - mae: 2.2723 - val_loss: 10.6575 - val_mae: 2.6133
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1199 - mae: 2.3045 - val_loss: 10.3907 - val_mae: 2.5156
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8955 - mae: 2.2839 - val_loss: 10.5196 - val_mae: 2.4930
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9675 - mae: 2.2863 - val_loss: 10.7318 - val_mae: 2.5602
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1883 - mae: 2.3022 - val_loss: 10.8457 - val_mae: 2.5077
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9106 - mae: 2.2797 - val_loss: 11.0691 - val_mae: 2.5275
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9472 - mae: 2.2964 - val_loss: 10.4702 - val_mae: 2.5025
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8431 - mae: 2.2712 - val_loss: 10.2675 - val_mae: 2.5146
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8723 - mae: 2.2757 - val_loss: 10.4510 - val_mae: 2.4964
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9000 - mae: 2.2827 - val_loss: 10.4647 - val_mae: 2.4954
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7780 - mae: 2.2706 - val_loss: 10.2826 - val_mae: 2.5447
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7979 - mae: 2.2629 - val_loss: 10.5594 - val_mae: 2.4981
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7641 - mae: 2.2582 - val_loss: 10.3127 - val_mae: 2.5137
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7664 - mae: 2.2619 - val_loss: 11.3729 - val_mae: 2.5470
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8737 - mae: 2.2771 - val_loss: 10.3982 - val_mae: 2.5304
Epoch 43/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7561 - mae: 2.2675 - val_loss: 10.4679 - val_mae: 2.5076
Epoch 44/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7487 - mae: 2.2623 - val_loss: 10.6014 - val_mae: 2.4968
Epoch 45/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7759 - mae: 2.2713 - val_loss: 10.7706 - val_mae: 2.5165
Epoch 46/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7543 - mae: 2.2674 - val_loss: 10.6295 - val_mae: 2.6046
Epoch 47/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7949 - mae: 2.2671 - val_loss: 10.5068 - val_mae: 2.5387
Epoch 48/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6809 - mae: 2.2454 - val_loss: 10.4845 - val_mae: 2.5066
Epoch 49/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6770 - mae: 2.2551 - val_loss: 10.2847 - val_mae: 2.5253
Epoch 50/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6498 - mae: 2.2543 - val_loss: 10.3331 - val_mae: 2.5339
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 57.5027 - mae: 6.0917 - val_loss: 41.6975 - val_mae: 5.1545
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 24.3025 - mae: 3.6736 - val_loss: 25.7083 - val_mae: 3.8503
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 17.5246 - mae: 3.0603 - val_loss: 20.3834 - val_mae: 3.4347
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 15.5750 - mae: 2.9264 - val_loss: 18.8488 - val_mae: 3.3243
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 15.3125 - mae: 2.9042 - val_loss: 17.4768 - val_mae: 3.2181
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 13.7388 - mae: 2.7270 - val_loss: 16.1020 - val_mae: 3.0836
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 12.3507 - mae: 2.5981 - val_loss: 14.9049 - val_mae: 2.9549
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 11.4126 - mae: 2.4972 - val_loss: 13.4184 - val_mae: 2.8198
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 11.0254 - mae: 2.4529 - val_loss: 12.6970 - val_mae: 2.7752
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 10.4798 - mae: 2.4139 - val_loss: 12.3339 - val_mae: 2.6788
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 10.1474 - mae: 2.3795 - val_loss: 11.5332 - val_mae: 2.6446
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 9.8648 - mae: 2.3525 - val_loss: 11.2388 - val_mae: 2.5958
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 9.7101 - mae: 2.3411 - val_loss: 11.2233 - val_mae: 2.5678
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5053 - mae: 2.3280 - val_loss: 10.8817 - val_mae: 2.5488
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4112 - mae: 2.3208 - val_loss: 10.7122 - val_mae: 2.5439
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2698 - mae: 2.3020 - val_loss: 10.7571 - val_mae: 2.5222
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1573 - mae: 2.2846 - val_loss: 10.8333 - val_mae: 2.5223
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1267 - mae: 2.2871 - val_loss: 10.7947 - val_mae: 2.5188
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0596 - mae: 2.2977 - val_loss: 11.1089 - val_mae: 2.5306
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0251 - mae: 2.2822 - val_loss: 10.7333 - val_mae: 2.5025
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8913 - mae: 2.2687 - val_loss: 11.1405 - val_mae: 2.7052
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0921 - mae: 2.3026 - val_loss: 10.3269 - val_mae: 2.5091
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8467 - mae: 2.2759 - val_loss: 10.7893 - val_mae: 2.5004
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9205 - mae: 2.2655 - val_loss: 10.3067 - val_mae: 2.4971
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8645 - mae: 2.2638 - val_loss: 10.3257 - val_mae: 2.5575
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9346 - mae: 2.2921 - val_loss: 10.3681 - val_mae: 2.5767
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8071 - mae: 2.2638 - val_loss: 10.3269 - val_mae: 2.5647
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8327 - mae: 2.2754 - val_loss: 10.7237 - val_mae: 2.4939
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7337 - mae: 2.2584 - val_loss: 11.0500 - val_mae: 2.5200
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8209 - mae: 2.2728 - val_loss: 10.2689 - val_mae: 2.5060
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9593 - mae: 2.2921 - val_loss: 10.2972 - val_mae: 2.5279
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8007 - mae: 2.2645 - val_loss: 10.5769 - val_mae: 2.5022
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8115 - mae: 2.2611 - val_loss: 10.2589 - val_mae: 2.5488
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7214 - mae: 2.2601 - val_loss: 10.3574 - val_mae: 2.4997
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7265 - mae: 2.2477 - val_loss: 10.5904 - val_mae: 2.5252
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7593 - mae: 2.2619 - val_loss: 10.4075 - val_mae: 2.5025
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6697 - mae: 2.2589 - val_loss: 10.2516 - val_mae: 2.5654
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6993 - mae: 2.2502 - val_loss: 10.4736 - val_mae: 2.5527
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6258 - mae: 2.2356 - val_loss: 10.2790 - val_mae: 2.5034
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6914 - mae: 2.2517 - val_loss: 10.7624 - val_mae: 2.5062
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6746 - mae: 2.2473 - val_loss: 10.2913 - val_mae: 2.5088
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6403 - mae: 2.2453 - val_loss: 10.5544 - val_mae: 2.5215
Epoch 43/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6101 - mae: 2.2427 - val_loss: 10.5007 - val_mae: 2.5032
Epoch 44/120
192/192 [==============================] - 1s 6ms/step - loss: 8.6348 - mae: 2.2380 - val_loss: 10.3510 - val_mae: 2.5047
Epoch 45/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5650 - mae: 2.2303 - val_loss: 10.1758 - val_mae: 2.5439
Epoch 46/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6474 - mae: 2.2548 - val_loss: 10.4521 - val_mae: 2.5106
Epoch 47/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6517 - mae: 2.2449 - val_loss: 10.3489 - val_mae: 2.5060
Epoch 48/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7180 - mae: 2.2556 - val_loss: 10.7368 - val_mae: 2.5233
Epoch 49/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6418 - mae: 2.2407 - val_loss: 10.1857 - val_mae: 2.5285
Epoch 50/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5473 - mae: 2.2265 - val_loss: 10.3637 - val_mae: 2.5094
Epoch 51/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6286 - mae: 2.2344 - val_loss: 10.2280 - val_mae: 2.5455
Epoch 52/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5673 - mae: 2.2288 - val_loss: 10.3791 - val_mae: 2.5268
Epoch 53/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4700 - mae: 2.2232 - val_loss: 10.6997 - val_mae: 2.5247
Epoch 54/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4567 - mae: 2.2230 - val_loss: 11.2671 - val_mae: 2.6615
Epoch 55/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5165 - mae: 2.2183 - val_loss: 11.0889 - val_mae: 2.5304
Epoch 56/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5846 - mae: 2.2423 - val_loss: 10.3798 - val_mae: 2.5491
Epoch 57/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5272 - mae: 2.2259 - val_loss: 10.3616 - val_mae: 2.5241
Epoch 58/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5093 - mae: 2.2235 - val_loss: 10.3682 - val_mae: 2.5609
Epoch 59/120
192/192 [==============================] - 1s 7ms/step - loss: 8.4942 - mae: 2.2251 - val_loss: 10.6855 - val_mae: 2.5164
Epoch 60/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5411 - mae: 2.2282 - val_loss: 10.4639 - val_mae: 2.5192
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 38.3172 - mae: 4.5374 - val_loss: 21.1297 - val_mae: 3.4896
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 15.8306 - mae: 2.9570 - val_loss: 18.2116 - val_mae: 3.2882
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 15.0342 - mae: 2.9127 - val_loss: 17.1012 - val_mae: 3.1636
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 13.0040 - mae: 2.6897 - val_loss: 14.2992 - val_mae: 2.9242
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 11.2486 - mae: 2.5091 - val_loss: 13.1648 - val_mae: 2.7282
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 10.3268 - mae: 2.4040 - val_loss: 11.5584 - val_mae: 2.6881
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 9.7682 - mae: 2.3524 - val_loss: 11.2568 - val_mae: 2.6193
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5418 - mae: 2.3205 - val_loss: 10.7678 - val_mae: 2.5773
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5953 - mae: 2.3538 - val_loss: 10.9961 - val_mae: 2.5395
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3897 - mae: 2.3377 - val_loss: 11.8386 - val_mae: 2.5869
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4402 - mae: 2.3325 - val_loss: 11.0317 - val_mae: 2.5264
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2484 - mae: 2.3213 - val_loss: 10.4708 - val_mae: 2.5604
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1109 - mae: 2.2994 - val_loss: 10.9694 - val_mae: 2.6882
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1395 - mae: 2.3151 - val_loss: 10.3788 - val_mae: 2.5050
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1230 - mae: 2.3009 - val_loss: 10.8069 - val_mae: 2.5261
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0809 - mae: 2.3056 - val_loss: 10.5908 - val_mae: 2.5230
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0613 - mae: 2.3054 - val_loss: 11.0530 - val_mae: 2.6275
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0125 - mae: 2.2939 - val_loss: 10.2486 - val_mae: 2.4981
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8898 - mae: 2.2888 - val_loss: 11.2531 - val_mae: 2.5349
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9356 - mae: 2.2795 - val_loss: 10.3039 - val_mae: 2.5056
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9094 - mae: 2.2819 - val_loss: 10.2063 - val_mae: 2.5100
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8913 - mae: 2.2909 - val_loss: 10.7974 - val_mae: 2.5800
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8113 - mae: 2.2690 - val_loss: 10.3655 - val_mae: 2.5298
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8788 - mae: 2.2752 - val_loss: 10.5309 - val_mae: 2.5051
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8747 - mae: 2.2808 - val_loss: 10.1799 - val_mae: 2.4990
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8368 - mae: 2.2785 - val_loss: 11.6466 - val_mae: 2.7805
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8186 - mae: 2.2681 - val_loss: 10.2308 - val_mae: 2.5263
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8522 - mae: 2.2638 - val_loss: 10.5735 - val_mae: 2.5762
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8310 - mae: 2.2622 - val_loss: 10.3667 - val_mae: 2.5319
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9080 - mae: 2.2818 - val_loss: 10.5654 - val_mae: 2.4989
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7896 - mae: 2.2682 - val_loss: 10.3469 - val_mae: 2.5522
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7851 - mae: 2.2633 - val_loss: 10.3554 - val_mae: 2.5100
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8562 - mae: 2.2797 - val_loss: 10.4124 - val_mae: 2.5159
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7021 - mae: 2.2544 - val_loss: 10.5840 - val_mae: 2.5216
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8174 - mae: 2.2531 - val_loss: 10.3610 - val_mae: 2.5403
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6411 - mae: 2.2534 - val_loss: 10.4226 - val_mae: 2.5444
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6961 - mae: 2.2573 - val_loss: 10.8879 - val_mae: 2.5523
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6452 - mae: 2.2500 - val_loss: 10.8517 - val_mae: 2.5429
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6950 - mae: 2.2619 - val_loss: 10.4104 - val_mae: 2.5141
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0753 - mae: 2.3054 - val_loss: 10.2178 - val_mae: 2.5408
Epoch 1/120
192/192 [==============================] - 4s 12ms/step - loss: 24.7585 - mae: 3.7222 - val_loss: 17.0984 - val_mae: 3.2242
Epoch 2/120
192/192 [==============================] - 1s 8ms/step - loss: 14.0254 - mae: 2.8208 - val_loss: 14.3192 - val_mae: 2.9115
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 10.8411 - mae: 2.4672 - val_loss: 12.0781 - val_mae: 2.6283
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 9.9414 - mae: 2.3813 - val_loss: 10.6852 - val_mae: 2.5772
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4442 - mae: 2.3368 - val_loss: 10.4692 - val_mae: 2.5090
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 9.5847 - mae: 2.3737 - val_loss: 11.4033 - val_mae: 2.5493
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3361 - mae: 2.3189 - val_loss: 10.6209 - val_mae: 2.5477
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4816 - mae: 2.3585 - val_loss: 13.3284 - val_mae: 3.0217
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 9.6069 - mae: 2.3666 - val_loss: 10.1542 - val_mae: 2.5316
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4403 - mae: 2.3612 - val_loss: 10.4976 - val_mae: 2.5839
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0793 - mae: 2.3000 - val_loss: 10.3623 - val_mae: 2.4979
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9091 - mae: 2.2913 - val_loss: 11.5874 - val_mae: 2.5761
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1047 - mae: 2.2950 - val_loss: 10.7759 - val_mae: 2.6435
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9706 - mae: 2.2902 - val_loss: 10.1644 - val_mae: 2.5087
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9856 - mae: 2.2897 - val_loss: 10.9460 - val_mae: 2.5127
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2347 - mae: 2.3153 - val_loss: 10.0651 - val_mae: 2.5355
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1098 - mae: 2.3124 - val_loss: 10.7868 - val_mae: 2.5775
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1481 - mae: 2.3205 - val_loss: 10.5654 - val_mae: 2.5220
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3789 - mae: 2.3266 - val_loss: 10.1297 - val_mae: 2.5289
Epoch 20/120
192/192 [==============================] - 2s 8ms/step - loss: 8.9015 - mae: 2.2822 - val_loss: 10.7598 - val_mae: 2.5195
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9735 - mae: 2.2882 - val_loss: 11.3732 - val_mae: 2.6988
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0470 - mae: 2.2987 - val_loss: 10.4677 - val_mae: 2.5997
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9896 - mae: 2.3032 - val_loss: 10.3675 - val_mae: 2.5891
Epoch 24/120
192/192 [==============================] - 1s 8ms/step - loss: 8.9957 - mae: 2.2947 - val_loss: 10.8312 - val_mae: 2.6299
Epoch 25/120
192/192 [==============================] - 1s 8ms/step - loss: 8.9865 - mae: 2.2837 - val_loss: 10.1110 - val_mae: 2.5210
Epoch 26/120
192/192 [==============================] - 2s 8ms/step - loss: 8.8993 - mae: 2.2889 - val_loss: 10.6349 - val_mae: 2.5142
Epoch 27/120
192/192 [==============================] - 2s 8ms/step - loss: 9.0282 - mae: 2.2997 - val_loss: 10.3587 - val_mae: 2.5798
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8410 - mae: 2.2541 - val_loss: 10.1178 - val_mae: 2.5465
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7939 - mae: 2.2693 - val_loss: 10.9490 - val_mae: 2.5725
Epoch 30/120
192/192 [==============================] - 1s 8ms/step - loss: 8.8616 - mae: 2.2564 - val_loss: 10.4963 - val_mae: 2.5420
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8757 - mae: 2.2874 - val_loss: 10.4275 - val_mae: 2.5085
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 41.4617 - mae: 4.9264 - val_loss: 23.5260 - val_mae: 3.6769
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 16.2480 - mae: 2.9836 - val_loss: 22.3248 - val_mae: 3.5837
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 15.2116 - mae: 2.9301 - val_loss: 17.5402 - val_mae: 3.2082
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 12.8321 - mae: 2.6541 - val_loss: 14.3338 - val_mae: 2.9038
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 11.1049 - mae: 2.4765 - val_loss: 12.2444 - val_mae: 2.6925
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 10.2678 - mae: 2.3931 - val_loss: 11.4367 - val_mae: 2.6139
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 9.9577 - mae: 2.3632 - val_loss: 11.3742 - val_mae: 2.6005
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 9.6852 - mae: 2.3599 - val_loss: 10.8338 - val_mae: 2.5944
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 9.6662 - mae: 2.3611 - val_loss: 10.7091 - val_mae: 2.5627
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3143 - mae: 2.3237 - val_loss: 10.7036 - val_mae: 2.5269
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2479 - mae: 2.3181 - val_loss: 10.5239 - val_mae: 2.4980
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 9.3392 - mae: 2.3185 - val_loss: 11.2362 - val_mae: 2.5298
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 9.2338 - mae: 2.3215 - val_loss: 10.8502 - val_mae: 2.5069
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0541 - mae: 2.2890 - val_loss: 10.5260 - val_mae: 2.5018
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0510 - mae: 2.3011 - val_loss: 10.4584 - val_mae: 2.5257
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9703 - mae: 2.2929 - val_loss: 10.3849 - val_mae: 2.5249
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0513 - mae: 2.3028 - val_loss: 10.6502 - val_mae: 2.6293
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 9.0414 - mae: 2.3102 - val_loss: 10.5210 - val_mae: 2.5987
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8789 - mae: 2.2830 - val_loss: 10.5821 - val_mae: 2.6070
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9747 - mae: 2.2994 - val_loss: 10.6631 - val_mae: 2.6214
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 8.9581 - mae: 2.2961 - val_loss: 10.2647 - val_mae: 2.5297
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7994 - mae: 2.2621 - val_loss: 10.5217 - val_mae: 2.4998
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7123 - mae: 2.2558 - val_loss: 10.1446 - val_mae: 2.5326
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8413 - mae: 2.2750 - val_loss: 10.6265 - val_mae: 2.5106
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8625 - mae: 2.2681 - val_loss: 10.5611 - val_mae: 2.4989
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7677 - mae: 2.2695 - val_loss: 11.0589 - val_mae: 2.5302
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8911 - mae: 2.2850 - val_loss: 10.2406 - val_mae: 2.5067
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7628 - mae: 2.2630 - val_loss: 10.5180 - val_mae: 2.5077
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8724 - mae: 2.2711 - val_loss: 10.2166 - val_mae: 2.5469
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7651 - mae: 2.2608 - val_loss: 10.4112 - val_mae: 2.5826
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7745 - mae: 2.2596 - val_loss: 10.4926 - val_mae: 2.5097
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7114 - mae: 2.2617 - val_loss: 10.9518 - val_mae: 2.5120
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7036 - mae: 2.2610 - val_loss: 10.6395 - val_mae: 2.5158
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 8.7776 - mae: 2.2643 - val_loss: 10.2912 - val_mae: 2.5213
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6420 - mae: 2.2511 - val_loss: 10.5066 - val_mae: 2.5527
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6985 - mae: 2.2656 - val_loss: 11.0547 - val_mae: 2.5460
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 8.6779 - mae: 2.2605 - val_loss: 10.3957 - val_mae: 2.5753
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 8.8317 - mae: 2.2742 - val_loss: 10.7890 - val_mae: 2.5314
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 39.4926 - mae: 4.8993 - val_loss: 19.7211 - val_mae: 3.3496
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 13.8739 - mae: 2.7437 - val_loss: 14.6356 - val_mae: 2.9378
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 11.9679 - mae: 2.5577 - val_loss: 13.9563 - val_mae: 2.8407
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 9.4641 - mae: 2.2050 - val_loss: 9.7903 - val_mae: 2.3625
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 6.7650 - mae: 1.8484 - val_loss: 7.0412 - val_mae: 1.9597
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 4.9739 - mae: 1.5767 - val_loss: 5.2901 - val_mae: 1.6581
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 4.0612 - mae: 1.4431 - val_loss: 4.5233 - val_mae: 1.5462
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 3.7051 - mae: 1.3969 - val_loss: 4.1525 - val_mae: 1.4908
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 3.5079 - mae: 1.3816 - val_loss: 4.2445 - val_mae: 1.5102
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 3.3464 - mae: 1.3565 - val_loss: 3.8923 - val_mae: 1.4626
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 3.1672 - mae: 1.3266 - val_loss: 3.6211 - val_mae: 1.4330
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 3.1925 - mae: 1.3324 - val_loss: 3.4924 - val_mae: 1.4181
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 3.0388 - mae: 1.3032 - val_loss: 3.4402 - val_mae: 1.4398
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 2.9945 - mae: 1.3021 - val_loss: 3.4589 - val_mae: 1.4364
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 2.9940 - mae: 1.3025 - val_loss: 3.5710 - val_mae: 1.4549
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 2.9013 - mae: 1.2836 - val_loss: 3.4337 - val_mae: 1.4201
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8971 - mae: 1.2970 - val_loss: 3.3894 - val_mae: 1.4281
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8583 - mae: 1.2715 - val_loss: 3.3520 - val_mae: 1.4132
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8909 - mae: 1.2880 - val_loss: 4.8075 - val_mae: 1.6866
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8868 - mae: 1.2911 - val_loss: 3.3617 - val_mae: 1.4310
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8086 - mae: 1.2633 - val_loss: 3.2454 - val_mae: 1.4161
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7959 - mae: 1.2783 - val_loss: 3.4058 - val_mae: 1.4260
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7566 - mae: 1.2575 - val_loss: 3.7470 - val_mae: 1.4983
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7711 - mae: 1.2628 - val_loss: 3.6091 - val_mae: 1.4604
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7606 - mae: 1.2558 - val_loss: 3.7649 - val_mae: 1.5023
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7537 - mae: 1.2558 - val_loss: 3.2721 - val_mae: 1.4124
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7195 - mae: 1.2515 - val_loss: 3.3921 - val_mae: 1.4382
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 2.9249 - mae: 1.3065 - val_loss: 3.6760 - val_mae: 1.4725
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7533 - mae: 1.2567 - val_loss: 3.4319 - val_mae: 1.4365
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6778 - mae: 1.2390 - val_loss: 3.4807 - val_mae: 1.4499
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6866 - mae: 1.2387 - val_loss: 3.2965 - val_mae: 1.4224
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6832 - mae: 1.2333 - val_loss: 3.5731 - val_mae: 1.4789
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6941 - mae: 1.2333 - val_loss: 3.7202 - val_mae: 1.5685
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6648 - mae: 1.2338 - val_loss: 3.9618 - val_mae: 1.6055
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 2.7017 - mae: 1.2410 - val_loss: 3.6206 - val_mae: 1.4815
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6684 - mae: 1.2305 - val_loss: 3.4654 - val_mae: 1.4564
Epoch 1/120
192/192 [==============================] - 5s 10ms/step - loss: 39.4224 - mae: 4.8064 - val_loss: 19.6580 - val_mae: 3.3853
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 12.3234 - mae: 2.5674 - val_loss: 12.5863 - val_mae: 2.7170
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 10.5741 - mae: 2.4021 - val_loss: 11.1359 - val_mae: 2.5887
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 8.3291 - mae: 2.0557 - val_loss: 8.3974 - val_mae: 2.1695
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 5.6065 - mae: 1.6445 - val_loss: 5.4757 - val_mae: 1.7031
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 3.6612 - mae: 1.2864 - val_loss: 3.7976 - val_mae: 1.4054
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 2.6078 - mae: 1.0881 - val_loss: 2.5481 - val_mae: 1.0690
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 1.8082 - mae: 0.8588 - val_loss: 1.9373 - val_mae: 0.9437
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 1.4435 - mae: 0.7872 - val_loss: 1.4971 - val_mae: 0.8335
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 1.2777 - mae: 0.7711 - val_loss: 1.3012 - val_mae: 0.7793
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 1.0931 - mae: 0.7240 - val_loss: 1.2708 - val_mae: 0.8020
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 1.0304 - mae: 0.7181 - val_loss: 1.5270 - val_mae: 0.9335
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 0.9624 - mae: 0.7024 - val_loss: 1.2168 - val_mae: 0.8056
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8789 - mae: 0.6813 - val_loss: 1.0313 - val_mae: 0.7447
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8653 - mae: 0.6787 - val_loss: 1.2567 - val_mae: 0.8519
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8392 - mae: 0.6842 - val_loss: 1.0057 - val_mae: 0.7476
Epoch 17/120
192/192 [==============================] - 1s 8ms/step - loss: 0.7856 - mae: 0.6555 - val_loss: 0.8933 - val_mae: 0.7090
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8226 - mae: 0.6793 - val_loss: 1.1858 - val_mae: 0.8520
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8000 - mae: 0.6709 - val_loss: 0.8897 - val_mae: 0.7086
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7601 - mae: 0.6556 - val_loss: 0.9238 - val_mae: 0.7278
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7121 - mae: 0.6334 - val_loss: 0.8002 - val_mae: 0.6815
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7482 - mae: 0.6511 - val_loss: 0.7855 - val_mae: 0.6796
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6866 - mae: 0.6214 - val_loss: 0.8926 - val_mae: 0.7307
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6938 - mae: 0.6319 - val_loss: 0.7860 - val_mae: 0.6880
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6870 - mae: 0.6309 - val_loss: 0.7870 - val_mae: 0.6898
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7014 - mae: 0.6333 - val_loss: 0.7768 - val_mae: 0.6848
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6820 - mae: 0.6264 - val_loss: 0.7275 - val_mae: 0.6615
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6703 - mae: 0.6211 - val_loss: 0.7609 - val_mae: 0.6836
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6727 - mae: 0.6257 - val_loss: 0.7654 - val_mae: 0.6878
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6414 - mae: 0.6112 - val_loss: 0.7780 - val_mae: 0.6899
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6624 - mae: 0.6236 - val_loss: 0.8948 - val_mae: 0.7487
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6641 - mae: 0.6233 - val_loss: 0.7684 - val_mae: 0.6781
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6624 - mae: 0.6108 - val_loss: 0.7971 - val_mae: 0.6981
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7100 - mae: 0.6446 - val_loss: 0.7576 - val_mae: 0.6821
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6723 - mae: 0.6257 - val_loss: 0.7747 - val_mae: 0.6914
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6480 - mae: 0.6134 - val_loss: 0.8384 - val_mae: 0.7173
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6407 - mae: 0.6141 - val_loss: 0.8551 - val_mae: 0.7288
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6418 - mae: 0.6114 - val_loss: 0.7598 - val_mae: 0.6779
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6435 - mae: 0.6165 - val_loss: 0.8107 - val_mae: 0.7040
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6636 - mae: 0.6209 - val_loss: 0.8310 - val_mae: 0.7116
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6406 - mae: 0.6223 - val_loss: 0.7492 - val_mae: 0.6979
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6077 - mae: 0.5964 - val_loss: 0.8870 - val_mae: 0.7498
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 38.2752 - mae: 4.8879 - val_loss: 16.2174 - val_mae: 3.0718
Epoch 2/120
192/192 [==============================] - 1s 7ms/step - loss: 10.6057 - mae: 2.3890 - val_loss: 10.5002 - val_mae: 2.4994
Epoch 3/120
192/192 [==============================] - 1s 7ms/step - loss: 9.1932 - mae: 2.2157 - val_loss: 9.7208 - val_mae: 2.3283
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 6.5769 - mae: 1.8196 - val_loss: 6.7777 - val_mae: 1.9086
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 4.2538 - mae: 1.3815 - val_loss: 4.2128 - val_mae: 1.4472
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8663 - mae: 1.1054 - val_loss: 2.6699 - val_mae: 1.1179
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 1.9954 - mae: 0.8987 - val_loss: 1.8841 - val_mae: 0.9050
Epoch 8/120
192/192 [==============================] - 1s 8ms/step - loss: 1.3687 - mae: 0.7270 - val_loss: 1.2995 - val_mae: 0.7441
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 1.0254 - mae: 0.6179 - val_loss: 0.9583 - val_mae: 0.6190
Epoch 10/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7700 - mae: 0.5272 - val_loss: 0.7142 - val_mae: 0.5183
Epoch 11/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6090 - mae: 0.4714 - val_loss: 0.7984 - val_mae: 0.6559
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 0.5098 - mae: 0.4507 - val_loss: 0.4680 - val_mae: 0.4126
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 0.4295 - mae: 0.4167 - val_loss: 0.3992 - val_mae: 0.3875
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 0.3640 - mae: 0.3941 - val_loss: 0.3517 - val_mae: 0.3681
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 0.3006 - mae: 0.3506 - val_loss: 0.3213 - val_mae: 0.3652
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2823 - mae: 0.3441 - val_loss: 0.4463 - val_mae: 0.4954
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2503 - mae: 0.3271 - val_loss: 0.2888 - val_mae: 0.3610
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2460 - mae: 0.3420 - val_loss: 0.2316 - val_mae: 0.3245
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2348 - mae: 0.3437 - val_loss: 0.2297 - val_mae: 0.3359
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2248 - mae: 0.3352 - val_loss: 0.2150 - val_mae: 0.3220
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2025 - mae: 0.3197 - val_loss: 0.1866 - val_mae: 0.3044
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2093 - mae: 0.3299 - val_loss: 0.3286 - val_mae: 0.4458
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1928 - mae: 0.3210 - val_loss: 0.2348 - val_mae: 0.3652
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1717 - mae: 0.3050 - val_loss: 0.1890 - val_mae: 0.3175
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 0.2063 - mae: 0.3337 - val_loss: 0.2087 - val_mae: 0.3524
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1844 - mae: 0.3250 - val_loss: 0.1678 - val_mae: 0.3051
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1524 - mae: 0.2861 - val_loss: 0.1497 - val_mae: 0.2893
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1559 - mae: 0.2885 - val_loss: 0.2336 - val_mae: 0.3679
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1703 - mae: 0.3116 - val_loss: 0.1997 - val_mae: 0.3389
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1662 - mae: 0.3099 - val_loss: 0.1498 - val_mae: 0.2953
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1478 - mae: 0.2899 - val_loss: 0.1606 - val_mae: 0.3091
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1203 - mae: 0.2625 - val_loss: 0.1435 - val_mae: 0.2954
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1546 - mae: 0.2968 - val_loss: 0.1704 - val_mae: 0.3238
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1798 - mae: 0.3244 - val_loss: 0.2513 - val_mae: 0.3853
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1846 - mae: 0.3312 - val_loss: 0.1511 - val_mae: 0.2972
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1362 - mae: 0.2814 - val_loss: 0.1421 - val_mae: 0.2929
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1782 - mae: 0.3190 - val_loss: 0.1333 - val_mae: 0.2827
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1467 - mae: 0.2883 - val_loss: 0.1447 - val_mae: 0.2966
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1357 - mae: 0.2823 - val_loss: 0.1577 - val_mae: 0.3139
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1282 - mae: 0.2736 - val_loss: 0.1410 - val_mae: 0.2918
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1203 - mae: 0.2661 - val_loss: 0.1400 - val_mae: 0.3016
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1234 - mae: 0.2737 - val_loss: 0.1729 - val_mae: 0.3305
Epoch 43/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1251 - mae: 0.2724 - val_loss: 0.2017 - val_mae: 0.3576
Epoch 44/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1258 - mae: 0.2704 - val_loss: 0.1429 - val_mae: 0.2976
Epoch 45/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1242 - mae: 0.2703 - val_loss: 0.2017 - val_mae: 0.3596
Epoch 46/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1200 - mae: 0.2660 - val_loss: 0.1514 - val_mae: 0.2962
Epoch 47/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1265 - mae: 0.2737 - val_loss: 0.1478 - val_mae: 0.3038
Epoch 48/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1288 - mae: 0.2800 - val_loss: 0.1515 - val_mae: 0.3052
Epoch 49/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1242 - mae: 0.2695 - val_loss: 0.1232 - val_mae: 0.2739
Epoch 50/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1187 - mae: 0.2653 - val_loss: 0.1657 - val_mae: 0.3206
Epoch 51/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1247 - mae: 0.2704 - val_loss: 0.1397 - val_mae: 0.2949
Epoch 52/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1149 - mae: 0.2635 - val_loss: 0.1261 - val_mae: 0.2795
Epoch 53/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1245 - mae: 0.2698 - val_loss: 0.1456 - val_mae: 0.3060
Epoch 54/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1442 - mae: 0.2932 - val_loss: 0.1340 - val_mae: 0.2886
Epoch 55/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1177 - mae: 0.2654 - val_loss: 0.1353 - val_mae: 0.2842
Epoch 56/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1295 - mae: 0.2739 - val_loss: 0.2217 - val_mae: 0.3752
Epoch 57/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1332 - mae: 0.2768 - val_loss: 0.1362 - val_mae: 0.2919
Epoch 58/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1341 - mae: 0.2782 - val_loss: 0.1568 - val_mae: 0.3080
Epoch 59/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1192 - mae: 0.2655 - val_loss: 0.1253 - val_mae: 0.2795
Epoch 60/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1089 - mae: 0.2525 - val_loss: 0.1649 - val_mae: 0.3235
Epoch 61/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1225 - mae: 0.2703 - val_loss: 0.1392 - val_mae: 0.2904
Epoch 62/120
192/192 [==============================] - 1s 8ms/step - loss: 0.1380 - mae: 0.2832 - val_loss: 0.1357 - val_mae: 0.2847
Epoch 63/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1100 - mae: 0.2554 - val_loss: 0.1396 - val_mae: 0.2965
Epoch 64/120
192/192 [==============================] - 1s 7ms/step - loss: 0.1142 - mae: 0.2578 - val_loss: 0.1373 - val_mae: 0.2931
Epoch 1/120
193/193 [==============================] - 4s 9ms/step - loss: 31.0828 - mae: 4.2342 - val_loss: 17.2363 - val_mae: 3.1235
Epoch 2/120
193/193 [==============================] - 1s 7ms/step - loss: 10.9653 - mae: 2.4192 - val_loss: 12.2583 - val_mae: 2.7317
Epoch 3/120
193/193 [==============================] - 1s 6ms/step - loss: 8.1825 - mae: 2.0489 - val_loss: 9.1364 - val_mae: 2.3107
Epoch 4/120
193/193 [==============================] - 1s 6ms/step - loss: 5.5997 - mae: 1.6585 - val_loss: 6.3323 - val_mae: 1.8331
Epoch 5/120
193/193 [==============================] - 1s 6ms/step - loss: 3.7489 - mae: 1.3244 - val_loss: 4.1719 - val_mae: 1.4532
Epoch 6/120
193/193 [==============================] - 1s 7ms/step - loss: 2.5593 - mae: 1.0759 - val_loss: 2.9887 - val_mae: 1.2245
Epoch 7/120
193/193 [==============================] - 1s 6ms/step - loss: 1.8577 - mae: 0.9038 - val_loss: 1.9544 - val_mae: 0.9556
Epoch 8/120
193/193 [==============================] - 1s 6ms/step - loss: 1.4230 - mae: 0.7881 - val_loss: 1.5645 - val_mae: 0.8781
Epoch 9/120
193/193 [==============================] - 1s 7ms/step - loss: 1.1828 - mae: 0.7406 - val_loss: 1.3821 - val_mae: 0.8361
Epoch 10/120
193/193 [==============================] - 1s 6ms/step - loss: 1.0887 - mae: 0.7260 - val_loss: 1.1943 - val_mae: 0.7870
Epoch 11/120
193/193 [==============================] - 1s 6ms/step - loss: 0.9731 - mae: 0.7000 - val_loss: 1.2316 - val_mae: 0.8292
Epoch 12/120
193/193 [==============================] - 1s 7ms/step - loss: 0.9195 - mae: 0.6900 - val_loss: 1.0415 - val_mae: 0.7404
Epoch 13/120
193/193 [==============================] - 1s 6ms/step - loss: 0.8566 - mae: 0.6756 - val_loss: 0.9716 - val_mae: 0.7267
Epoch 14/120
193/193 [==============================] - 1s 6ms/step - loss: 0.8633 - mae: 0.6859 - val_loss: 1.0075 - val_mae: 0.7558
Epoch 15/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7977 - mae: 0.6583 - val_loss: 0.9521 - val_mae: 0.7433
Epoch 16/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7526 - mae: 0.6492 - val_loss: 0.9147 - val_mae: 0.7264
Epoch 17/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7550 - mae: 0.6455 - val_loss: 0.8997 - val_mae: 0.7181
Epoch 18/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7507 - mae: 0.6478 - val_loss: 0.8577 - val_mae: 0.7075
Epoch 19/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7340 - mae: 0.6467 - val_loss: 0.8284 - val_mae: 0.7019
Epoch 20/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7651 - mae: 0.6643 - val_loss: 0.8373 - val_mae: 0.7011
Epoch 21/120
193/193 [==============================] - 1s 7ms/step - loss: 0.7083 - mae: 0.6352 - val_loss: 0.8061 - val_mae: 0.6962
Epoch 22/120
193/193 [==============================] - 1s 6ms/step - loss: 0.7062 - mae: 0.6352 - val_loss: 0.7708 - val_mae: 0.6849
Epoch 23/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6974 - mae: 0.6314 - val_loss: 0.7768 - val_mae: 0.6819
Epoch 24/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6920 - mae: 0.6309 - val_loss: 0.8696 - val_mae: 0.7357
Epoch 25/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6549 - mae: 0.6126 - val_loss: 0.8710 - val_mae: 0.7265
Epoch 26/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6416 - mae: 0.6037 - val_loss: 0.7698 - val_mae: 0.6781
Epoch 27/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6868 - mae: 0.6329 - val_loss: 1.0706 - val_mae: 0.8116
Epoch 28/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6424 - mae: 0.6114 - val_loss: 0.7697 - val_mae: 0.6826
Epoch 29/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6512 - mae: 0.6148 - val_loss: 0.8134 - val_mae: 0.7076
Epoch 30/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6620 - mae: 0.6177 - val_loss: 0.7723 - val_mae: 0.6928
Epoch 31/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6413 - mae: 0.6081 - val_loss: 0.8981 - val_mae: 0.7511
Epoch 32/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6394 - mae: 0.6075 - val_loss: 0.8305 - val_mae: 0.7256
Epoch 33/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6250 - mae: 0.6026 - val_loss: 0.7522 - val_mae: 0.6788
Epoch 34/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6153 - mae: 0.5992 - val_loss: 0.8629 - val_mae: 0.7202
Epoch 35/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6108 - mae: 0.5982 - val_loss: 0.8312 - val_mae: 0.7111
Epoch 36/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6080 - mae: 0.5940 - val_loss: 0.7163 - val_mae: 0.6650
Epoch 37/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6376 - mae: 0.6060 - val_loss: 0.7307 - val_mae: 0.6713
Epoch 38/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6108 - mae: 0.5926 - val_loss: 0.7652 - val_mae: 0.6869
Epoch 39/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6483 - mae: 0.6153 - val_loss: 0.7341 - val_mae: 0.6705
Epoch 40/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6227 - mae: 0.6030 - val_loss: 0.7249 - val_mae: 0.6677
Epoch 41/120
193/193 [==============================] - 1s 6ms/step - loss: 0.6174 - mae: 0.6020 - val_loss: 0.7346 - val_mae: 0.6754
Epoch 42/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6217 - mae: 0.6050 - val_loss: 0.8559 - val_mae: 0.7236
Epoch 43/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6068 - mae: 0.5987 - val_loss: 0.7415 - val_mae: 0.6746
Epoch 44/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6419 - mae: 0.6137 - val_loss: 0.7069 - val_mae: 0.6623
Epoch 45/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6200 - mae: 0.5970 - val_loss: 0.7453 - val_mae: 0.6799
Epoch 46/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6315 - mae: 0.6042 - val_loss: 0.7759 - val_mae: 0.6833
Epoch 47/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5997 - mae: 0.5886 - val_loss: 0.8403 - val_mae: 0.7095
Epoch 48/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5913 - mae: 0.5859 - val_loss: 0.7737 - val_mae: 0.6869
Epoch 49/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6229 - mae: 0.5983 - val_loss: 0.9838 - val_mae: 0.7840
Epoch 50/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6458 - mae: 0.6139 - val_loss: 0.9353 - val_mae: 0.7625
Epoch 51/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6159 - mae: 0.5964 - val_loss: 0.8019 - val_mae: 0.6900
Epoch 52/120
193/193 [==============================] - 1s 6ms/step - loss: 0.5812 - mae: 0.5802 - val_loss: 0.7842 - val_mae: 0.6910
Epoch 53/120
193/193 [==============================] - 1s 7ms/step - loss: 0.6025 - mae: 0.5894 - val_loss: 0.8452 - val_mae: 0.7141
Epoch 54/120
193/193 [==============================] - 1s 6ms/step - loss: 0.5992 - mae: 0.5914 - val_loss: 0.7953 - val_mae: 0.6916
Epoch 55/120
193/193 [==============================] - 1s 6ms/step - loss: 0.5952 - mae: 0.5843 - val_loss: 0.7257 - val_mae: 0.6663
Epoch 56/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5988 - mae: 0.5918 - val_loss: 0.8095 - val_mae: 0.7022
Epoch 57/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5846 - mae: 0.5864 - val_loss: 0.7900 - val_mae: 0.7098
Epoch 58/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5969 - mae: 0.5854 - val_loss: 0.8197 - val_mae: 0.6997
Epoch 59/120
193/193 [==============================] - 1s 7ms/step - loss: 0.5811 - mae: 0.5783 - val_loss: 0.7916 - val_mae: 0.6958
Epoch 1/120
192/192 [==============================] - 4s 10ms/step - loss: 34.3454 - mae: 4.4804 - val_loss: 16.6348 - val_mae: 3.0712
Epoch 2/120
192/192 [==============================] - 1s 8ms/step - loss: 11.4835 - mae: 2.4821 - val_loss: 12.1785 - val_mae: 2.6948
Epoch 3/120
192/192 [==============================] - 1s 8ms/step - loss: 10.7089 - mae: 2.4219 - val_loss: 12.0346 - val_mae: 2.6593
Epoch 4/120
192/192 [==============================] - 1s 7ms/step - loss: 8.5446 - mae: 2.1020 - val_loss: 8.8423 - val_mae: 2.2379
Epoch 5/120
192/192 [==============================] - 1s 7ms/step - loss: 5.8755 - mae: 1.7022 - val_loss: 5.9090 - val_mae: 1.8136
Epoch 6/120
192/192 [==============================] - 1s 7ms/step - loss: 4.0105 - mae: 1.3789 - val_loss: 4.0927 - val_mae: 1.4779
Epoch 7/120
192/192 [==============================] - 1s 7ms/step - loss: 2.8215 - mae: 1.1377 - val_loss: 2.8346 - val_mae: 1.1652
Epoch 8/120
192/192 [==============================] - 1s 7ms/step - loss: 2.0736 - mae: 0.9599 - val_loss: 2.0826 - val_mae: 0.9885
Epoch 9/120
192/192 [==============================] - 1s 7ms/step - loss: 1.6162 - mae: 0.8562 - val_loss: 1.6274 - val_mae: 0.8713
Epoch 10/120
192/192 [==============================] - 1s 8ms/step - loss: 1.3051 - mae: 0.7748 - val_loss: 1.4017 - val_mae: 0.8405
Epoch 11/120
192/192 [==============================] - 1s 8ms/step - loss: 1.1528 - mae: 0.7423 - val_loss: 1.3041 - val_mae: 0.8226
Epoch 12/120
192/192 [==============================] - 1s 7ms/step - loss: 1.0384 - mae: 0.7240 - val_loss: 1.0935 - val_mae: 0.7480
Epoch 13/120
192/192 [==============================] - 1s 7ms/step - loss: 0.9390 - mae: 0.6951 - val_loss: 1.0926 - val_mae: 0.7665
Epoch 14/120
192/192 [==============================] - 1s 7ms/step - loss: 0.9009 - mae: 0.6921 - val_loss: 1.0220 - val_mae: 0.7459
Epoch 15/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8746 - mae: 0.6858 - val_loss: 1.0633 - val_mae: 0.7609
Epoch 16/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8724 - mae: 0.6953 - val_loss: 1.0354 - val_mae: 0.7724
Epoch 17/120
192/192 [==============================] - 1s 7ms/step - loss: 0.8548 - mae: 0.6971 - val_loss: 0.9202 - val_mae: 0.7248
Epoch 18/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7959 - mae: 0.6704 - val_loss: 0.9673 - val_mae: 0.7335
Epoch 19/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7793 - mae: 0.6636 - val_loss: 0.9000 - val_mae: 0.7223
Epoch 20/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7707 - mae: 0.6655 - val_loss: 1.0999 - val_mae: 0.8236
Epoch 21/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7271 - mae: 0.6439 - val_loss: 0.8545 - val_mae: 0.7070
Epoch 22/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7305 - mae: 0.6417 - val_loss: 0.8107 - val_mae: 0.6935
Epoch 23/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6948 - mae: 0.6303 - val_loss: 0.8421 - val_mae: 0.7021
Epoch 24/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7119 - mae: 0.6368 - val_loss: 0.9624 - val_mae: 0.7627
Epoch 25/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7176 - mae: 0.6398 - val_loss: 0.7614 - val_mae: 0.6713
Epoch 26/120
192/192 [==============================] - 1s 7ms/step - loss: 0.7069 - mae: 0.6327 - val_loss: 0.8696 - val_mae: 0.7172
Epoch 27/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6722 - mae: 0.6188 - val_loss: 0.7978 - val_mae: 0.7051
Epoch 28/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6955 - mae: 0.6338 - val_loss: 0.7916 - val_mae: 0.6947
Epoch 29/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6662 - mae: 0.6253 - val_loss: 0.8346 - val_mae: 0.7309
Epoch 30/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6631 - mae: 0.6198 - val_loss: 0.8469 - val_mae: 0.7221
Epoch 31/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6766 - mae: 0.6243 - val_loss: 0.7931 - val_mae: 0.6943
Epoch 32/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6396 - mae: 0.6101 - val_loss: 0.7657 - val_mae: 0.7065
Epoch 33/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6979 - mae: 0.6360 - val_loss: 0.7341 - val_mae: 0.6737
Epoch 34/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6758 - mae: 0.6257 - val_loss: 0.9084 - val_mae: 0.7537
Epoch 35/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6938 - mae: 0.6309 - val_loss: 0.7594 - val_mae: 0.6859
Epoch 36/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6259 - mae: 0.6007 - val_loss: 0.8795 - val_mae: 0.7352
Epoch 37/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6690 - mae: 0.6246 - val_loss: 0.8096 - val_mae: 0.7016
Epoch 38/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6665 - mae: 0.6221 - val_loss: 0.9480 - val_mae: 0.7660
Epoch 39/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6358 - mae: 0.6089 - val_loss: 0.8662 - val_mae: 0.7263
Epoch 40/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6802 - mae: 0.6305 - val_loss: 0.8428 - val_mae: 0.7445
Epoch 41/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6320 - mae: 0.6019 - val_loss: 0.8085 - val_mae: 0.7031
Epoch 42/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6274 - mae: 0.6025 - val_loss: 0.9366 - val_mae: 0.7544
Epoch 43/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6406 - mae: 0.6098 - val_loss: 0.7111 - val_mae: 0.6715
Epoch 44/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6275 - mae: 0.6042 - val_loss: 0.8071 - val_mae: 0.6990
Epoch 45/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6311 - mae: 0.6110 - val_loss: 0.8000 - val_mae: 0.6997
Epoch 46/120
192/192 [==============================] - 1s 8ms/step - loss: 0.6436 - mae: 0.6125 - val_loss: 0.8396 - val_mae: 0.7151
Epoch 47/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6587 - mae: 0.6282 - val_loss: 0.7410 - val_mae: 0.6761
Epoch 48/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6419 - mae: 0.6081 - val_loss: 0.7356 - val_mae: 0.6706
Epoch 49/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6098 - mae: 0.6002 - val_loss: 0.8618 - val_mae: 0.7242
Epoch 50/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6260 - mae: 0.6065 - val_loss: 0.7902 - val_mae: 0.6869
Epoch 51/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6321 - mae: 0.6111 - val_loss: 0.7372 - val_mae: 0.6660
Epoch 52/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6440 - mae: 0.6155 - val_loss: 0.7366 - val_mae: 0.6688
Epoch 53/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6240 - mae: 0.6027 - val_loss: 0.7855 - val_mae: 0.6982
Epoch 54/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6175 - mae: 0.5993 - val_loss: 0.7535 - val_mae: 0.6818
Epoch 55/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6204 - mae: 0.6015 - val_loss: 0.7448 - val_mae: 0.6711
Epoch 56/120
192/192 [==============================] - 1s 8ms/step - loss: 0.5993 - mae: 0.5953 - val_loss: 0.7527 - val_mae: 0.6802
Epoch 57/120
192/192 [==============================] - 1s 7ms/step - loss: 0.6160 - mae: 0.6003 - val_loss: 0.7325 - val_mae: 0.6648
Epoch 58/120
192/192 [==============================] - 1s 8ms/step - loss: 0.6118 - mae: 0.5995 - val_loss: 0.7396 - val_mae: 0.6702
Epoch 1/120
189/189 [==============================] - 4s 12ms/step - loss: 37.2446 - mae: 4.7209 - val_loss: 17.9556 - val_mae: 3.1976
Epoch 2/120
189/189 [==============================] - 2s 9ms/step - loss: 11.4290 - mae: 2.4858 - val_loss: 12.7621 - val_mae: 2.7245
Epoch 3/120
189/189 [==============================] - 2s 9ms/step - loss: 9.7915 - mae: 2.3001 - val_loss: 10.7131 - val_mae: 2.4812
Epoch 4/120
189/189 [==============================] - 2s 9ms/step - loss: 7.1362 - mae: 1.9000 - val_loss: 8.2771 - val_mae: 2.1428
Epoch 5/120
189/189 [==============================] - 2s 9ms/step - loss: 5.0771 - mae: 1.5944 - val_loss: 5.1098 - val_mae: 1.6497
Epoch 6/120
189/189 [==============================] - 2s 9ms/step - loss: 3.3609 - mae: 1.2511 - val_loss: 3.3511 - val_mae: 1.2828
Epoch 7/120
189/189 [==============================] - 2s 9ms/step - loss: 2.3940 - mae: 1.0394 - val_loss: 2.3650 - val_mae: 1.0537
Epoch 8/120
189/189 [==============================] - 2s 9ms/step - loss: 1.7716 - mae: 0.8875 - val_loss: 1.7974 - val_mae: 0.9148
Epoch 9/120
189/189 [==============================] - 2s 9ms/step - loss: 1.3473 - mae: 0.7800 - val_loss: 1.4267 - val_mae: 0.8073
Epoch 10/120
189/189 [==============================] - 2s 9ms/step - loss: 1.1809 - mae: 0.7528 - val_loss: 1.2515 - val_mae: 0.7896
Epoch 11/120
189/189 [==============================] - 2s 9ms/step - loss: 1.0267 - mae: 0.7092 - val_loss: 1.2806 - val_mae: 0.8294
Epoch 12/120
189/189 [==============================] - 2s 9ms/step - loss: 0.9777 - mae: 0.7182 - val_loss: 1.1687 - val_mae: 0.7902
Epoch 13/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8883 - mae: 0.6811 - val_loss: 1.0296 - val_mae: 0.7458
Epoch 14/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8706 - mae: 0.6910 - val_loss: 0.9627 - val_mae: 0.7096
Epoch 15/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8135 - mae: 0.6603 - val_loss: 0.9423 - val_mae: 0.7208
Epoch 16/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7947 - mae: 0.6659 - val_loss: 0.8987 - val_mae: 0.6976
Epoch 17/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7899 - mae: 0.6682 - val_loss: 0.8661 - val_mae: 0.6991
Epoch 18/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8141 - mae: 0.6777 - val_loss: 0.9602 - val_mae: 0.7450
Epoch 19/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8005 - mae: 0.6796 - val_loss: 0.9163 - val_mae: 0.7202
Epoch 20/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7753 - mae: 0.6576 - val_loss: 0.8206 - val_mae: 0.6848
Epoch 21/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7296 - mae: 0.6493 - val_loss: 0.8224 - val_mae: 0.7038
Epoch 22/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7162 - mae: 0.6451 - val_loss: 0.7926 - val_mae: 0.6773
Epoch 23/120
189/189 [==============================] - 2s 11ms/step - loss: 0.6991 - mae: 0.6266 - val_loss: 0.8102 - val_mae: 0.6984
Epoch 24/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6876 - mae: 0.6274 - val_loss: 1.0158 - val_mae: 0.7829
Epoch 25/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7345 - mae: 0.6486 - val_loss: 0.7973 - val_mae: 0.6930
Epoch 26/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7042 - mae: 0.6345 - val_loss: 0.8318 - val_mae: 0.7047
Epoch 27/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6787 - mae: 0.6313 - val_loss: 0.7704 - val_mae: 0.6897
Epoch 28/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6715 - mae: 0.6223 - val_loss: 0.7541 - val_mae: 0.6757
Epoch 29/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6696 - mae: 0.6222 - val_loss: 0.7598 - val_mae: 0.6751
Epoch 30/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6579 - mae: 0.6178 - val_loss: 0.7387 - val_mae: 0.6675
Epoch 31/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6724 - mae: 0.6279 - val_loss: 0.7725 - val_mae: 0.6876
Epoch 32/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6771 - mae: 0.6307 - val_loss: 0.8081 - val_mae: 0.6985
Epoch 33/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6414 - mae: 0.6070 - val_loss: 0.7277 - val_mae: 0.6626
Epoch 34/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6535 - mae: 0.6120 - val_loss: 0.7480 - val_mae: 0.6703
Epoch 35/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6287 - mae: 0.6048 - val_loss: 0.7431 - val_mae: 0.6720
Epoch 36/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6267 - mae: 0.6036 - val_loss: 0.7115 - val_mae: 0.6549
Epoch 37/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6495 - mae: 0.6178 - val_loss: 0.7102 - val_mae: 0.6541
Epoch 38/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6467 - mae: 0.6150 - val_loss: 0.7318 - val_mae: 0.6635
Epoch 39/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6727 - mae: 0.6303 - val_loss: 0.8617 - val_mae: 0.7237
Epoch 40/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6447 - mae: 0.6166 - val_loss: 1.0604 - val_mae: 0.8202
Epoch 41/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7144 - mae: 0.6410 - val_loss: 0.7405 - val_mae: 0.6650
Epoch 42/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6476 - mae: 0.6138 - val_loss: 0.7035 - val_mae: 0.6518
Epoch 43/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6475 - mae: 0.6194 - val_loss: 0.7520 - val_mae: 0.6864
Epoch 44/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6320 - mae: 0.6009 - val_loss: 0.7299 - val_mae: 0.6700
Epoch 45/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6465 - mae: 0.6175 - val_loss: 0.7281 - val_mae: 0.6620
Epoch 46/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6371 - mae: 0.6123 - val_loss: 0.7100 - val_mae: 0.6583
Epoch 47/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6393 - mae: 0.6125 - val_loss: 0.9041 - val_mae: 0.7515
Epoch 48/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6210 - mae: 0.6057 - val_loss: 0.7934 - val_mae: 0.6930
Epoch 49/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6262 - mae: 0.6064 - val_loss: 0.7679 - val_mae: 0.7019
Epoch 50/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6469 - mae: 0.6098 - val_loss: 0.7940 - val_mae: 0.6882
Epoch 51/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6160 - mae: 0.6001 - val_loss: 0.7374 - val_mae: 0.6664
Epoch 52/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6699 - mae: 0.6268 - val_loss: 0.7243 - val_mae: 0.6588
Epoch 53/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6288 - mae: 0.6054 - val_loss: 0.7044 - val_mae: 0.6518
Epoch 54/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6387 - mae: 0.6092 - val_loss: 0.7149 - val_mae: 0.6603
Epoch 55/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6213 - mae: 0.6036 - val_loss: 0.9535 - val_mae: 0.7704
Epoch 56/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6323 - mae: 0.6061 - val_loss: 0.7329 - val_mae: 0.6776
Epoch 57/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6293 - mae: 0.6073 - val_loss: 0.8049 - val_mae: 0.7001
Epoch 1/120
189/189 [==============================] - 3s 8ms/step - loss: 56.7079 - mae: 5.9415 - val_loss: 19.9163 - val_mae: 3.3949
Epoch 2/120
189/189 [==============================] - 1s 6ms/step - loss: 10.7773 - mae: 2.3560 - val_loss: 10.7864 - val_mae: 2.4949
Epoch 3/120
189/189 [==============================] - 1s 6ms/step - loss: 7.1768 - mae: 1.8899 - val_loss: 7.6160 - val_mae: 2.0599
Epoch 4/120
189/189 [==============================] - 1s 6ms/step - loss: 5.3074 - mae: 1.6289 - val_loss: 5.9020 - val_mae: 1.7995
Epoch 5/120
189/189 [==============================] - 1s 6ms/step - loss: 4.1722 - mae: 1.4356 - val_loss: 4.9940 - val_mae: 1.6377
Epoch 6/120
189/189 [==============================] - 1s 6ms/step - loss: 3.3927 - mae: 1.3036 - val_loss: 3.8039 - val_mae: 1.4210
Epoch 7/120
189/189 [==============================] - 1s 6ms/step - loss: 2.7888 - mae: 1.1795 - val_loss: 3.2563 - val_mae: 1.3179
Epoch 8/120
189/189 [==============================] - 1s 6ms/step - loss: 2.3423 - mae: 1.0846 - val_loss: 2.7965 - val_mae: 1.2102
Epoch 9/120
189/189 [==============================] - 1s 6ms/step - loss: 1.9466 - mae: 0.9853 - val_loss: 2.3642 - val_mae: 1.1063
Epoch 10/120
189/189 [==============================] - 1s 6ms/step - loss: 1.6570 - mae: 0.9067 - val_loss: 1.8970 - val_mae: 0.9943
Epoch 11/120
189/189 [==============================] - 1s 6ms/step - loss: 1.4343 - mae: 0.8461 - val_loss: 1.7311 - val_mae: 0.9459
Epoch 12/120
189/189 [==============================] - 1s 6ms/step - loss: 1.2307 - mae: 0.7870 - val_loss: 1.4421 - val_mae: 0.8653
Epoch 13/120
189/189 [==============================] - 1s 6ms/step - loss: 1.1169 - mae: 0.7522 - val_loss: 1.3014 - val_mae: 0.8296
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9992 - mae: 0.7146 - val_loss: 1.1876 - val_mae: 0.7996
Epoch 15/120
189/189 [==============================] - 1s 6ms/step - loss: 0.9232 - mae: 0.6952 - val_loss: 1.1370 - val_mae: 0.7916
Epoch 16/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8658 - mae: 0.6751 - val_loss: 1.0672 - val_mae: 0.7743
Epoch 17/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8171 - mae: 0.6608 - val_loss: 0.9959 - val_mae: 0.7560
Epoch 18/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7967 - mae: 0.6542 - val_loss: 0.9656 - val_mae: 0.7423
Epoch 19/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7624 - mae: 0.6447 - val_loss: 1.0029 - val_mae: 0.7678
Epoch 20/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7405 - mae: 0.6309 - val_loss: 0.9494 - val_mae: 0.7506
Epoch 21/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7229 - mae: 0.6312 - val_loss: 0.9573 - val_mae: 0.7494
Epoch 22/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7210 - mae: 0.6323 - val_loss: 0.8847 - val_mae: 0.7191
Epoch 23/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7057 - mae: 0.6244 - val_loss: 0.8697 - val_mae: 0.7201
Epoch 24/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6960 - mae: 0.6266 - val_loss: 0.8700 - val_mae: 0.7240
Epoch 25/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6794 - mae: 0.6214 - val_loss: 0.8522 - val_mae: 0.7119
Epoch 26/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6796 - mae: 0.6195 - val_loss: 0.8667 - val_mae: 0.7117
Epoch 27/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6743 - mae: 0.6144 - val_loss: 0.9242 - val_mae: 0.7440
Epoch 28/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6669 - mae: 0.6138 - val_loss: 0.7846 - val_mae: 0.6830
Epoch 29/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6624 - mae: 0.6129 - val_loss: 0.7854 - val_mae: 0.6832
Epoch 30/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6878 - mae: 0.6257 - val_loss: 0.8338 - val_mae: 0.7069
Epoch 31/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7095 - mae: 0.6364 - val_loss: 0.9066 - val_mae: 0.7308
Epoch 32/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6719 - mae: 0.6161 - val_loss: 0.8656 - val_mae: 0.7193
Epoch 33/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6791 - mae: 0.6160 - val_loss: 0.9537 - val_mae: 0.7727
Epoch 34/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6738 - mae: 0.6202 - val_loss: 0.8068 - val_mae: 0.6996
Epoch 35/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6779 - mae: 0.6189 - val_loss: 0.8814 - val_mae: 0.7367
Epoch 36/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6576 - mae: 0.6117 - val_loss: 0.7870 - val_mae: 0.6888
Epoch 37/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6567 - mae: 0.6156 - val_loss: 0.7764 - val_mae: 0.6770
Epoch 38/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6476 - mae: 0.6068 - val_loss: 0.7986 - val_mae: 0.6955
Epoch 39/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6437 - mae: 0.6046 - val_loss: 0.8031 - val_mae: 0.6988
Epoch 40/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6538 - mae: 0.6138 - val_loss: 0.8213 - val_mae: 0.7071
Epoch 41/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6281 - mae: 0.5981 - val_loss: 0.7486 - val_mae: 0.6724
Epoch 42/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6295 - mae: 0.6004 - val_loss: 0.7340 - val_mae: 0.6657
Epoch 43/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6305 - mae: 0.6023 - val_loss: 0.7519 - val_mae: 0.6734
Epoch 44/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6308 - mae: 0.6057 - val_loss: 0.7520 - val_mae: 0.6760
Epoch 45/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6209 - mae: 0.5976 - val_loss: 0.8085 - val_mae: 0.7037
Epoch 46/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6226 - mae: 0.5975 - val_loss: 0.8393 - val_mae: 0.7212
Epoch 47/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6341 - mae: 0.6038 - val_loss: 0.7827 - val_mae: 0.6902
Epoch 48/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6195 - mae: 0.5974 - val_loss: 0.7913 - val_mae: 0.7061
Epoch 49/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6257 - mae: 0.5994 - val_loss: 0.7901 - val_mae: 0.6937
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6214 - mae: 0.5987 - val_loss: 0.7181 - val_mae: 0.6617
Epoch 51/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6277 - mae: 0.6018 - val_loss: 0.7928 - val_mae: 0.6968
Epoch 52/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6126 - mae: 0.5932 - val_loss: 0.7576 - val_mae: 0.6826
Epoch 53/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6181 - mae: 0.5944 - val_loss: 0.8255 - val_mae: 0.7144
Epoch 54/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6322 - mae: 0.6075 - val_loss: 0.7584 - val_mae: 0.6800
Epoch 55/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6192 - mae: 0.5995 - val_loss: 0.7152 - val_mae: 0.6627
Epoch 56/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6064 - mae: 0.5922 - val_loss: 0.7601 - val_mae: 0.6804
Epoch 57/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5963 - mae: 0.5910 - val_loss: 0.8357 - val_mae: 0.7162
Epoch 58/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5908 - mae: 0.5873 - val_loss: 0.7356 - val_mae: 0.6608
Epoch 59/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6006 - mae: 0.5911 - val_loss: 0.7662 - val_mae: 0.6812
Epoch 60/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6036 - mae: 0.5943 - val_loss: 0.7772 - val_mae: 0.6883
Epoch 61/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5971 - mae: 0.5879 - val_loss: 0.7724 - val_mae: 0.6945
Epoch 62/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5916 - mae: 0.5870 - val_loss: 0.7434 - val_mae: 0.6707
Epoch 63/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6086 - mae: 0.5936 - val_loss: 0.7328 - val_mae: 0.6666
Epoch 64/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5971 - mae: 0.5901 - val_loss: 0.7252 - val_mae: 0.6594
Epoch 65/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6051 - mae: 0.5889 - val_loss: 0.7440 - val_mae: 0.6706
Epoch 66/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6108 - mae: 0.5970 - val_loss: 0.7087 - val_mae: 0.6568
Epoch 67/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6123 - mae: 0.5979 - val_loss: 0.7237 - val_mae: 0.6590
Epoch 68/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5970 - mae: 0.5913 - val_loss: 0.7622 - val_mae: 0.6789
Epoch 69/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5910 - mae: 0.5848 - val_loss: 0.7843 - val_mae: 0.6857
Epoch 70/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5907 - mae: 0.5864 - val_loss: 0.7418 - val_mae: 0.6693
Epoch 71/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6098 - mae: 0.6012 - val_loss: 0.7230 - val_mae: 0.6567
Epoch 72/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5848 - mae: 0.5832 - val_loss: 0.7384 - val_mae: 0.6746
Epoch 73/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5986 - mae: 0.5908 - val_loss: 0.7337 - val_mae: 0.6682
Epoch 74/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5863 - mae: 0.5863 - val_loss: 0.7444 - val_mae: 0.6678
Epoch 75/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5887 - mae: 0.5854 - val_loss: 0.7582 - val_mae: 0.6842
Epoch 76/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6068 - mae: 0.5946 - val_loss: 0.7210 - val_mae: 0.6627
Epoch 77/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5858 - mae: 0.5833 - val_loss: 0.7433 - val_mae: 0.6783
Epoch 78/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6018 - mae: 0.5941 - val_loss: 0.7208 - val_mae: 0.6640
Epoch 79/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5916 - mae: 0.5865 - val_loss: 0.7207 - val_mae: 0.6630
Epoch 80/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5838 - mae: 0.5852 - val_loss: 0.7538 - val_mae: 0.6790
Epoch 81/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6010 - mae: 0.5904 - val_loss: 0.7392 - val_mae: 0.6610
Epoch 1/120
189/189 [==============================] - 5s 13ms/step - loss: 36.0232 - mae: 4.7118 - val_loss: 19.2633 - val_mae: 3.3306
Epoch 2/120
189/189 [==============================] - 2s 9ms/step - loss: 12.4827 - mae: 2.6059 - val_loss: 12.7134 - val_mae: 2.7405
Epoch 3/120
189/189 [==============================] - 2s 9ms/step - loss: 10.6728 - mae: 2.4067 - val_loss: 12.4935 - val_mae: 2.6667
Epoch 4/120
189/189 [==============================] - 2s 9ms/step - loss: 8.5681 - mae: 2.1151 - val_loss: 8.8933 - val_mae: 2.2519
Epoch 5/120
189/189 [==============================] - 2s 9ms/step - loss: 5.8632 - mae: 1.6828 - val_loss: 6.2932 - val_mae: 1.8198
Epoch 6/120
189/189 [==============================] - 2s 10ms/step - loss: 4.0180 - mae: 1.3665 - val_loss: 4.1963 - val_mae: 1.4468
Epoch 7/120
189/189 [==============================] - 2s 9ms/step - loss: 2.8391 - mae: 1.1337 - val_loss: 2.8266 - val_mae: 1.1404
Epoch 8/120
189/189 [==============================] - 2s 9ms/step - loss: 2.0034 - mae: 0.9305 - val_loss: 2.0647 - val_mae: 0.9618
Epoch 9/120
189/189 [==============================] - 2s 9ms/step - loss: 1.5560 - mae: 0.8256 - val_loss: 1.6176 - val_mae: 0.8611
Epoch 10/120
189/189 [==============================] - 2s 9ms/step - loss: 1.2767 - mae: 0.7581 - val_loss: 1.4419 - val_mae: 0.8323
Epoch 11/120
189/189 [==============================] - 2s 9ms/step - loss: 1.1317 - mae: 0.7358 - val_loss: 1.3260 - val_mae: 0.8198
Epoch 12/120
189/189 [==============================] - 2s 9ms/step - loss: 1.0396 - mae: 0.7124 - val_loss: 1.1430 - val_mae: 0.7579
Epoch 13/120
189/189 [==============================] - 2s 10ms/step - loss: 0.9446 - mae: 0.6953 - val_loss: 1.1145 - val_mae: 0.7595
Epoch 14/120
189/189 [==============================] - 2s 9ms/step - loss: 0.9043 - mae: 0.6906 - val_loss: 1.0658 - val_mae: 0.7497
Epoch 15/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8622 - mae: 0.6836 - val_loss: 1.2381 - val_mae: 0.8473
Epoch 16/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8148 - mae: 0.6708 - val_loss: 0.9866 - val_mae: 0.7386
Epoch 17/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8112 - mae: 0.6669 - val_loss: 0.9284 - val_mae: 0.7103
Epoch 18/120
189/189 [==============================] - 2s 9ms/step - loss: 0.8074 - mae: 0.6666 - val_loss: 1.1182 - val_mae: 0.8148
Epoch 19/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7882 - mae: 0.6718 - val_loss: 0.9833 - val_mae: 0.7532
Epoch 20/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7168 - mae: 0.6355 - val_loss: 0.8989 - val_mae: 0.7163
Epoch 21/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7270 - mae: 0.6532 - val_loss: 0.9972 - val_mae: 0.7708
Epoch 22/120
189/189 [==============================] - 2s 9ms/step - loss: 0.7414 - mae: 0.6426 - val_loss: 0.8851 - val_mae: 0.7179
Epoch 23/120
189/189 [==============================] - 2s 10ms/step - loss: 0.7125 - mae: 0.6363 - val_loss: 0.7956 - val_mae: 0.6873
Epoch 24/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6865 - mae: 0.6227 - val_loss: 0.7862 - val_mae: 0.6847
Epoch 25/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6727 - mae: 0.6289 - val_loss: 0.8504 - val_mae: 0.7237
Epoch 26/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6763 - mae: 0.6226 - val_loss: 0.7794 - val_mae: 0.6842
Epoch 27/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6655 - mae: 0.6183 - val_loss: 0.9218 - val_mae: 0.7604
Epoch 28/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6908 - mae: 0.6379 - val_loss: 0.8573 - val_mae: 0.7161
Epoch 29/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6725 - mae: 0.6280 - val_loss: 1.0539 - val_mae: 0.7954
Epoch 30/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6558 - mae: 0.6195 - val_loss: 0.8431 - val_mae: 0.7426
Epoch 31/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6790 - mae: 0.6305 - val_loss: 0.9373 - val_mae: 0.7622
Epoch 32/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6438 - mae: 0.6144 - val_loss: 0.9395 - val_mae: 0.7560
Epoch 33/120
189/189 [==============================] - 2s 10ms/step - loss: 0.7213 - mae: 0.6539 - val_loss: 0.7932 - val_mae: 0.7155
Epoch 34/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6573 - mae: 0.6163 - val_loss: 0.7246 - val_mae: 0.6641
Epoch 35/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6485 - mae: 0.6147 - val_loss: 0.7286 - val_mae: 0.6673
Epoch 36/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6650 - mae: 0.6243 - val_loss: 0.7921 - val_mae: 0.7040
Epoch 37/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6371 - mae: 0.6100 - val_loss: 0.7829 - val_mae: 0.6949
Epoch 38/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6288 - mae: 0.6101 - val_loss: 0.8161 - val_mae: 0.7029
Epoch 39/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6311 - mae: 0.6033 - val_loss: 0.9740 - val_mae: 0.7829
Epoch 40/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6608 - mae: 0.6220 - val_loss: 0.7999 - val_mae: 0.6993
Epoch 41/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6609 - mae: 0.6248 - val_loss: 0.8194 - val_mae: 0.7075
Epoch 42/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6300 - mae: 0.6031 - val_loss: 0.8745 - val_mae: 0.7285
Epoch 43/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6522 - mae: 0.6185 - val_loss: 0.7625 - val_mae: 0.6771
Epoch 44/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6329 - mae: 0.6088 - val_loss: 0.7178 - val_mae: 0.6561
Epoch 45/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6328 - mae: 0.6107 - val_loss: 0.7595 - val_mae: 0.6831
Epoch 46/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6242 - mae: 0.6075 - val_loss: 0.7903 - val_mae: 0.7030
Epoch 47/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6286 - mae: 0.6125 - val_loss: 0.8581 - val_mae: 0.7255
Epoch 48/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6478 - mae: 0.6180 - val_loss: 0.7095 - val_mae: 0.6647
Epoch 49/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6340 - mae: 0.6115 - val_loss: 0.7019 - val_mae: 0.6598
Epoch 50/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6216 - mae: 0.6029 - val_loss: 0.7641 - val_mae: 0.6819
Epoch 51/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6166 - mae: 0.6009 - val_loss: 0.7048 - val_mae: 0.6514
Epoch 52/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6252 - mae: 0.6039 - val_loss: 0.7286 - val_mae: 0.6728
Epoch 53/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6262 - mae: 0.6049 - val_loss: 0.7201 - val_mae: 0.6544
Epoch 54/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6344 - mae: 0.6164 - val_loss: 0.7863 - val_mae: 0.6818
Epoch 55/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6307 - mae: 0.6094 - val_loss: 0.7721 - val_mae: 0.6821
Epoch 56/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6049 - mae: 0.6002 - val_loss: 0.7418 - val_mae: 0.6817
Epoch 57/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6113 - mae: 0.6038 - val_loss: 0.8039 - val_mae: 0.6946
Epoch 58/120
189/189 [==============================] - 2s 9ms/step - loss: 0.6365 - mae: 0.6129 - val_loss: 0.7246 - val_mae: 0.6666
Epoch 59/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6436 - mae: 0.6191 - val_loss: 0.7771 - val_mae: 0.6850
Epoch 60/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6126 - mae: 0.6004 - val_loss: 0.8131 - val_mae: 0.7018
Epoch 61/120
189/189 [==============================] - 2s 10ms/step - loss: 0.5989 - mae: 0.5897 - val_loss: 0.8244 - val_mae: 0.7020
Epoch 62/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6034 - mae: 0.5958 - val_loss: 0.7629 - val_mae: 0.6850
Epoch 63/120
189/189 [==============================] - 2s 10ms/step - loss: 0.5992 - mae: 0.5942 - val_loss: 0.7431 - val_mae: 0.6655
Epoch 64/120
189/189 [==============================] - 2s 10ms/step - loss: 0.6180 - mae: 0.6003 - val_loss: 0.7131 - val_mae: 0.6552
Epoch 1/120
189/189 [==============================] - 3s 8ms/step - loss: 62.4933 - mae: 6.3232 - val_loss: 27.0084 - val_mae: 4.1426
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 12.5809 - mae: 2.5311 - val_loss: 12.2437 - val_mae: 2.6170
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 7.7753 - mae: 1.9513 - val_loss: 8.9840 - val_mae: 2.2193
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 5.7574 - mae: 1.6621 - val_loss: 6.6078 - val_mae: 1.8979
Epoch 5/120
189/189 [==============================] - 1s 6ms/step - loss: 4.5119 - mae: 1.4695 - val_loss: 5.5806 - val_mae: 1.7172
Epoch 6/120
189/189 [==============================] - 1s 7ms/step - loss: 3.5722 - mae: 1.2993 - val_loss: 4.0605 - val_mae: 1.4529
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9072 - mae: 1.1840 - val_loss: 3.3215 - val_mae: 1.2998
Epoch 8/120
189/189 [==============================] - 1s 6ms/step - loss: 2.3203 - mae: 1.0494 - val_loss: 3.1227 - val_mae: 1.2717
Epoch 9/120
189/189 [==============================] - 1s 6ms/step - loss: 1.9390 - mae: 0.9522 - val_loss: 2.2773 - val_mae: 1.0635
Epoch 10/120
189/189 [==============================] - 1s 7ms/step - loss: 1.6335 - mae: 0.8752 - val_loss: 1.9241 - val_mae: 0.9797
Epoch 11/120
189/189 [==============================] - 1s 6ms/step - loss: 1.3972 - mae: 0.8124 - val_loss: 1.6715 - val_mae: 0.9223
Epoch 12/120
189/189 [==============================] - 1s 7ms/step - loss: 1.2142 - mae: 0.7639 - val_loss: 1.4808 - val_mae: 0.8758
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 1.0931 - mae: 0.7293 - val_loss: 1.4113 - val_mae: 0.8591
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 1.0111 - mae: 0.7155 - val_loss: 1.2321 - val_mae: 0.8170
Epoch 15/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9389 - mae: 0.6919 - val_loss: 1.1403 - val_mae: 0.7856
Epoch 16/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8898 - mae: 0.6831 - val_loss: 1.1061 - val_mae: 0.7744
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8326 - mae: 0.6619 - val_loss: 1.0778 - val_mae: 0.7836
Epoch 18/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8139 - mae: 0.6600 - val_loss: 0.9739 - val_mae: 0.7439
Epoch 19/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8019 - mae: 0.6622 - val_loss: 1.0502 - val_mae: 0.7650
Epoch 20/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7713 - mae: 0.6511 - val_loss: 0.9791 - val_mae: 0.7575
Epoch 21/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7496 - mae: 0.6396 - val_loss: 0.9504 - val_mae: 0.7423
Epoch 22/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7166 - mae: 0.6294 - val_loss: 0.8835 - val_mae: 0.7125
Epoch 23/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7189 - mae: 0.6290 - val_loss: 0.8972 - val_mae: 0.7205
Epoch 24/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7488 - mae: 0.6536 - val_loss: 0.9081 - val_mae: 0.7256
Epoch 25/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7096 - mae: 0.6277 - val_loss: 0.8578 - val_mae: 0.7056
Epoch 26/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7016 - mae: 0.6269 - val_loss: 0.8520 - val_mae: 0.7090
Epoch 27/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7051 - mae: 0.6337 - val_loss: 0.8277 - val_mae: 0.6964
Epoch 28/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6752 - mae: 0.6161 - val_loss: 0.8951 - val_mae: 0.7282
Epoch 29/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6815 - mae: 0.6194 - val_loss: 0.8334 - val_mae: 0.7028
Epoch 30/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6705 - mae: 0.6178 - val_loss: 0.8595 - val_mae: 0.7225
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6706 - mae: 0.6167 - val_loss: 0.8604 - val_mae: 0.7181
Epoch 32/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6722 - mae: 0.6158 - val_loss: 0.8371 - val_mae: 0.7139
Epoch 33/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6615 - mae: 0.6120 - val_loss: 0.7928 - val_mae: 0.6866
Epoch 34/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6550 - mae: 0.6128 - val_loss: 0.7691 - val_mae: 0.6795
Epoch 35/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6479 - mae: 0.6093 - val_loss: 0.7678 - val_mae: 0.6756
Epoch 36/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6636 - mae: 0.6125 - val_loss: 0.8424 - val_mae: 0.7166
Epoch 37/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6366 - mae: 0.6023 - val_loss: 0.8014 - val_mae: 0.7060
Epoch 38/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6355 - mae: 0.6009 - val_loss: 0.7662 - val_mae: 0.6752
Epoch 39/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6417 - mae: 0.6041 - val_loss: 0.7628 - val_mae: 0.6772
Epoch 40/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6320 - mae: 0.6028 - val_loss: 0.7558 - val_mae: 0.6829
Epoch 41/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6337 - mae: 0.6051 - val_loss: 0.7675 - val_mae: 0.6803
Epoch 42/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6580 - mae: 0.6100 - val_loss: 0.7874 - val_mae: 0.6937
Epoch 43/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6379 - mae: 0.6052 - val_loss: 0.8357 - val_mae: 0.7131
Epoch 44/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6460 - mae: 0.6096 - val_loss: 0.7870 - val_mae: 0.6930
Epoch 45/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6430 - mae: 0.6052 - val_loss: 1.0809 - val_mae: 0.8345
Epoch 46/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6604 - mae: 0.6202 - val_loss: 0.7885 - val_mae: 0.6905
Epoch 47/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6440 - mae: 0.6112 - val_loss: 0.7746 - val_mae: 0.6825
Epoch 48/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6274 - mae: 0.6026 - val_loss: 0.7872 - val_mae: 0.6898
Epoch 49/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6189 - mae: 0.5986 - val_loss: 0.8319 - val_mae: 0.7141
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6127 - mae: 0.5911 - val_loss: 0.8113 - val_mae: 0.7097
Epoch 51/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6240 - mae: 0.5978 - val_loss: 0.8235 - val_mae: 0.7117
Epoch 52/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6209 - mae: 0.5982 - val_loss: 0.8305 - val_mae: 0.7077
Epoch 53/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6094 - mae: 0.5928 - val_loss: 0.7958 - val_mae: 0.6947
Epoch 54/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6244 - mae: 0.6028 - val_loss: 0.7849 - val_mae: 0.6886
Epoch 55/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6093 - mae: 0.5937 - val_loss: 0.7863 - val_mae: 0.7021
Epoch 1/120
189/189 [==============================] - 3s 8ms/step - loss: 81.1299 - mae: 7.8674 - val_loss: 64.2365 - val_mae: 7.2652
Epoch 2/120
189/189 [==============================] - 1s 6ms/step - loss: 30.6533 - mae: 4.3953 - val_loss: 33.3239 - val_mae: 4.7759
Epoch 3/120
189/189 [==============================] - 1s 6ms/step - loss: 18.2809 - mae: 3.1192 - val_loss: 21.3737 - val_mae: 3.5633
Epoch 4/120
189/189 [==============================] - 1s 6ms/step - loss: 12.9101 - mae: 2.5236 - val_loss: 15.4097 - val_mae: 2.9423
Epoch 5/120
189/189 [==============================] - 1s 6ms/step - loss: 9.8994 - mae: 2.1860 - val_loss: 11.8986 - val_mae: 2.5589
Epoch 6/120
189/189 [==============================] - 1s 6ms/step - loss: 8.0754 - mae: 1.9488 - val_loss: 9.7792 - val_mae: 2.3053
Epoch 7/120
189/189 [==============================] - 1s 6ms/step - loss: 6.6387 - mae: 1.7429 - val_loss: 8.2027 - val_mae: 2.0902
Epoch 8/120
189/189 [==============================] - 1s 6ms/step - loss: 5.6120 - mae: 1.5913 - val_loss: 6.9469 - val_mae: 1.9111
Epoch 9/120
189/189 [==============================] - 1s 6ms/step - loss: 4.8001 - mae: 1.4675 - val_loss: 5.8758 - val_mae: 1.7371
Epoch 10/120
189/189 [==============================] - 1s 6ms/step - loss: 4.1903 - mae: 1.3645 - val_loss: 5.0145 - val_mae: 1.6076
Epoch 11/120
189/189 [==============================] - 1s 6ms/step - loss: 3.6401 - mae: 1.2716 - val_loss: 4.4961 - val_mae: 1.5009
Epoch 12/120
189/189 [==============================] - 1s 6ms/step - loss: 3.1994 - mae: 1.1823 - val_loss: 3.8395 - val_mae: 1.3807
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8242 - mae: 1.1185 - val_loss: 3.4469 - val_mae: 1.2986
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 2.4946 - mae: 1.0460 - val_loss: 3.0547 - val_mae: 1.2182
Epoch 15/120
189/189 [==============================] - 1s 6ms/step - loss: 2.2185 - mae: 0.9790 - val_loss: 2.7805 - val_mae: 1.1569
Epoch 16/120
189/189 [==============================] - 1s 7ms/step - loss: 1.9700 - mae: 0.9218 - val_loss: 2.4730 - val_mae: 1.0878
Epoch 17/120
189/189 [==============================] - 1s 6ms/step - loss: 1.7961 - mae: 0.8816 - val_loss: 2.2516 - val_mae: 1.0379
Epoch 18/120
189/189 [==============================] - 1s 6ms/step - loss: 1.6154 - mae: 0.8390 - val_loss: 2.0548 - val_mae: 0.9892
Epoch 19/120
189/189 [==============================] - 1s 6ms/step - loss: 1.4917 - mae: 0.8127 - val_loss: 1.9086 - val_mae: 0.9621
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 1.3826 - mae: 0.7847 - val_loss: 1.7105 - val_mae: 0.9136
Epoch 21/120
189/189 [==============================] - 1s 6ms/step - loss: 1.2830 - mae: 0.7607 - val_loss: 1.6252 - val_mae: 0.9021
Epoch 22/120
189/189 [==============================] - 1s 7ms/step - loss: 1.2140 - mae: 0.7488 - val_loss: 1.5019 - val_mae: 0.8694
Epoch 23/120
189/189 [==============================] - 1s 6ms/step - loss: 1.1369 - mae: 0.7307 - val_loss: 1.4207 - val_mae: 0.8501
Epoch 24/120
189/189 [==============================] - 1s 6ms/step - loss: 1.0795 - mae: 0.7166 - val_loss: 1.3436 - val_mae: 0.8307
Epoch 25/120
189/189 [==============================] - 1s 6ms/step - loss: 1.0324 - mae: 0.7089 - val_loss: 1.3020 - val_mae: 0.8224
Epoch 26/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9956 - mae: 0.6991 - val_loss: 1.2980 - val_mae: 0.8296
Epoch 27/120
189/189 [==============================] - 1s 6ms/step - loss: 0.9514 - mae: 0.6895 - val_loss: 1.2169 - val_mae: 0.8072
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9075 - mae: 0.6779 - val_loss: 1.2041 - val_mae: 0.8066
Epoch 29/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8937 - mae: 0.6758 - val_loss: 1.1236 - val_mae: 0.7878
Epoch 30/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8555 - mae: 0.6650 - val_loss: 1.0832 - val_mae: 0.7732
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8357 - mae: 0.6633 - val_loss: 1.0496 - val_mae: 0.7663
Epoch 32/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8143 - mae: 0.6554 - val_loss: 1.0152 - val_mae: 0.7580
Epoch 33/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7951 - mae: 0.6481 - val_loss: 1.0013 - val_mae: 0.7550
Epoch 34/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7756 - mae: 0.6433 - val_loss: 0.9993 - val_mae: 0.7638
Epoch 35/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7562 - mae: 0.6393 - val_loss: 0.9595 - val_mae: 0.7372
Epoch 36/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7575 - mae: 0.6421 - val_loss: 0.9478 - val_mae: 0.7378
Epoch 37/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7419 - mae: 0.6353 - val_loss: 0.9207 - val_mae: 0.7259
Epoch 38/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7163 - mae: 0.6266 - val_loss: 0.9142 - val_mae: 0.7280
Epoch 39/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7261 - mae: 0.6290 - val_loss: 0.9440 - val_mae: 0.7476
Epoch 40/120
189/189 [==============================] - 1s 6ms/step - loss: 0.7021 - mae: 0.6192 - val_loss: 0.9051 - val_mae: 0.7240
Epoch 41/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6958 - mae: 0.6193 - val_loss: 0.8946 - val_mae: 0.7267
Epoch 42/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6947 - mae: 0.6235 - val_loss: 0.8963 - val_mae: 0.7273
Epoch 43/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6855 - mae: 0.6170 - val_loss: 0.8681 - val_mae: 0.7155
Epoch 44/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6757 - mae: 0.6138 - val_loss: 0.8808 - val_mae: 0.7227
Epoch 45/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6845 - mae: 0.6174 - val_loss: 0.8534 - val_mae: 0.7041
Epoch 46/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6665 - mae: 0.6105 - val_loss: 0.8809 - val_mae: 0.7286
Epoch 47/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6687 - mae: 0.6133 - val_loss: 0.8595 - val_mae: 0.7105
Epoch 48/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6697 - mae: 0.6120 - val_loss: 0.8604 - val_mae: 0.7163
Epoch 49/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6613 - mae: 0.6118 - val_loss: 0.8144 - val_mae: 0.6933
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6550 - mae: 0.6078 - val_loss: 0.8229 - val_mae: 0.7025
Epoch 51/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6540 - mae: 0.6062 - val_loss: 0.8250 - val_mae: 0.6950
Epoch 52/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6563 - mae: 0.6092 - val_loss: 0.8380 - val_mae: 0.7027
Epoch 53/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6441 - mae: 0.6013 - val_loss: 0.8035 - val_mae: 0.6922
Epoch 54/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6542 - mae: 0.6085 - val_loss: 0.8208 - val_mae: 0.7049
Epoch 55/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6407 - mae: 0.6039 - val_loss: 0.8247 - val_mae: 0.7030
Epoch 56/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6409 - mae: 0.6048 - val_loss: 0.7991 - val_mae: 0.6962
Epoch 57/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6447 - mae: 0.6043 - val_loss: 0.8506 - val_mae: 0.7102
Epoch 58/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6339 - mae: 0.5995 - val_loss: 0.8003 - val_mae: 0.6948
Epoch 59/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6402 - mae: 0.6041 - val_loss: 0.7826 - val_mae: 0.6855
Epoch 60/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6330 - mae: 0.6001 - val_loss: 0.8408 - val_mae: 0.7091
Epoch 61/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6407 - mae: 0.6033 - val_loss: 0.7791 - val_mae: 0.6848
Epoch 62/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6259 - mae: 0.5973 - val_loss: 0.7574 - val_mae: 0.6753
Epoch 63/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6388 - mae: 0.6042 - val_loss: 0.7832 - val_mae: 0.6929
Epoch 64/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6330 - mae: 0.6016 - val_loss: 0.7612 - val_mae: 0.6796
Epoch 65/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6251 - mae: 0.5947 - val_loss: 0.7706 - val_mae: 0.6796
Epoch 66/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6255 - mae: 0.5973 - val_loss: 0.7628 - val_mae: 0.6815
Epoch 67/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6162 - mae: 0.5959 - val_loss: 0.7634 - val_mae: 0.6761
Epoch 68/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6239 - mae: 0.5982 - val_loss: 0.7469 - val_mae: 0.6751
Epoch 69/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6149 - mae: 0.5954 - val_loss: 0.7784 - val_mae: 0.6851
Epoch 70/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6368 - mae: 0.6059 - val_loss: 0.7646 - val_mae: 0.6813
Epoch 71/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6203 - mae: 0.5955 - val_loss: 0.7789 - val_mae: 0.6920
Epoch 72/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6128 - mae: 0.5903 - val_loss: 0.7617 - val_mae: 0.6799
Epoch 73/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6094 - mae: 0.5934 - val_loss: 0.7755 - val_mae: 0.6894
Epoch 74/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6106 - mae: 0.5918 - val_loss: 0.7621 - val_mae: 0.6787
Epoch 75/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6149 - mae: 0.5926 - val_loss: 0.7678 - val_mae: 0.6886
Epoch 76/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6121 - mae: 0.5922 - val_loss: 0.7496 - val_mae: 0.6740
Epoch 77/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6073 - mae: 0.5885 - val_loss: 0.7503 - val_mae: 0.6741
Epoch 78/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6261 - mae: 0.6001 - val_loss: 0.7519 - val_mae: 0.6765
Epoch 79/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6055 - mae: 0.5911 - val_loss: 0.7630 - val_mae: 0.6819
Epoch 80/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6024 - mae: 0.5908 - val_loss: 0.7771 - val_mae: 0.6943
Epoch 81/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5987 - mae: 0.5892 - val_loss: 0.8077 - val_mae: 0.7040
Epoch 82/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6013 - mae: 0.5892 - val_loss: 0.7643 - val_mae: 0.6826
Epoch 83/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6067 - mae: 0.5913 - val_loss: 0.7502 - val_mae: 0.6788
Epoch 1/120
189/189 [==============================] - 3s 8ms/step - loss: 47.4713 - mae: 5.3326 - val_loss: 15.8499 - val_mae: 2.9895
Epoch 2/120
189/189 [==============================] - 1s 6ms/step - loss: 9.6044 - mae: 2.2220 - val_loss: 10.6164 - val_mae: 2.4418
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 7.0287 - mae: 1.8880 - val_loss: 7.4329 - val_mae: 2.0622
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 5.2396 - mae: 1.6153 - val_loss: 5.7076 - val_mae: 1.7752
Epoch 5/120
189/189 [==============================] - 1s 7ms/step - loss: 4.0007 - mae: 1.3987 - val_loss: 4.6602 - val_mae: 1.5848
Epoch 6/120
189/189 [==============================] - 1s 7ms/step - loss: 3.2439 - mae: 1.2611 - val_loss: 3.6883 - val_mae: 1.3858
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 2.6286 - mae: 1.1302 - val_loss: 3.0480 - val_mae: 1.2574
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 2.1925 - mae: 1.0358 - val_loss: 2.6411 - val_mae: 1.1669
Epoch 9/120
189/189 [==============================] - 1s 7ms/step - loss: 1.8281 - mae: 0.9525 - val_loss: 2.1919 - val_mae: 1.0675
Epoch 10/120
189/189 [==============================] - 1s 7ms/step - loss: 1.5557 - mae: 0.8725 - val_loss: 1.8096 - val_mae: 0.9781
Epoch 11/120
189/189 [==============================] - 1s 7ms/step - loss: 1.3630 - mae: 0.8272 - val_loss: 1.6560 - val_mae: 0.9492
Epoch 12/120
189/189 [==============================] - 1s 7ms/step - loss: 1.2173 - mae: 0.7838 - val_loss: 1.4906 - val_mae: 0.8886
Epoch 13/120
189/189 [==============================] - 1s 6ms/step - loss: 1.0808 - mae: 0.7434 - val_loss: 1.3029 - val_mae: 0.8320
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9741 - mae: 0.7127 - val_loss: 1.2208 - val_mae: 0.8173
Epoch 15/120
189/189 [==============================] - 1s 6ms/step - loss: 0.9215 - mae: 0.6921 - val_loss: 1.0851 - val_mae: 0.7808
Epoch 16/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8543 - mae: 0.6751 - val_loss: 1.0561 - val_mae: 0.7806
Epoch 17/120
189/189 [==============================] - 1s 6ms/step - loss: 0.8170 - mae: 0.6627 - val_loss: 1.0157 - val_mae: 0.7543
Epoch 18/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7825 - mae: 0.6519 - val_loss: 1.0110 - val_mae: 0.7595
Epoch 19/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7598 - mae: 0.6431 - val_loss: 0.9790 - val_mae: 0.7486
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7516 - mae: 0.6463 - val_loss: 0.8982 - val_mae: 0.7210
Epoch 21/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7233 - mae: 0.6329 - val_loss: 1.0937 - val_mae: 0.8044
Epoch 22/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7369 - mae: 0.6430 - val_loss: 0.9110 - val_mae: 0.7406
Epoch 23/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7188 - mae: 0.6377 - val_loss: 0.9554 - val_mae: 0.7589
Epoch 24/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7148 - mae: 0.6385 - val_loss: 0.8573 - val_mae: 0.7116
Epoch 25/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7027 - mae: 0.6271 - val_loss: 0.8526 - val_mae: 0.7215
Epoch 26/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6798 - mae: 0.6203 - val_loss: 0.8369 - val_mae: 0.7097
Epoch 27/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7157 - mae: 0.6340 - val_loss: 0.8258 - val_mae: 0.7072
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6805 - mae: 0.6184 - val_loss: 0.8694 - val_mae: 0.7228
Epoch 29/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6650 - mae: 0.6167 - val_loss: 0.8128 - val_mae: 0.6959
Epoch 30/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8052 - mae: 0.6737 - val_loss: 0.8536 - val_mae: 0.7182
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6823 - mae: 0.6272 - val_loss: 0.7927 - val_mae: 0.6893
Epoch 32/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6728 - mae: 0.6174 - val_loss: 0.7662 - val_mae: 0.6745
Epoch 33/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6488 - mae: 0.6084 - val_loss: 0.8092 - val_mae: 0.7003
Epoch 34/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6501 - mae: 0.6059 - val_loss: 0.7530 - val_mae: 0.6706
Epoch 35/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6545 - mae: 0.6073 - val_loss: 0.7986 - val_mae: 0.6946
Epoch 36/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6542 - mae: 0.6125 - val_loss: 0.8062 - val_mae: 0.7030
Epoch 37/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6499 - mae: 0.6117 - val_loss: 0.7955 - val_mae: 0.6942
Epoch 38/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6452 - mae: 0.6052 - val_loss: 0.7449 - val_mae: 0.6752
Epoch 39/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6258 - mae: 0.5993 - val_loss: 0.7714 - val_mae: 0.6814
Epoch 40/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6287 - mae: 0.6054 - val_loss: 0.7673 - val_mae: 0.6806
Epoch 41/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6221 - mae: 0.5969 - val_loss: 0.7709 - val_mae: 0.6902
Epoch 42/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6285 - mae: 0.5999 - val_loss: 0.7993 - val_mae: 0.6992
Epoch 43/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6132 - mae: 0.5972 - val_loss: 0.7574 - val_mae: 0.6823
Epoch 44/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6206 - mae: 0.6021 - val_loss: 0.7452 - val_mae: 0.6721
Epoch 45/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6140 - mae: 0.5957 - val_loss: 0.7187 - val_mae: 0.6605
Epoch 46/120
189/189 [==============================] - 1s 6ms/step - loss: 0.6295 - mae: 0.5995 - val_loss: 0.7204 - val_mae: 0.6589
Epoch 47/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6246 - mae: 0.6025 - val_loss: 0.7422 - val_mae: 0.6691
Epoch 48/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6060 - mae: 0.5957 - val_loss: 0.7466 - val_mae: 0.6636
Epoch 49/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6209 - mae: 0.6021 - val_loss: 0.7485 - val_mae: 0.6750
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6129 - mae: 0.5963 - val_loss: 0.7514 - val_mae: 0.6714
Epoch 51/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6095 - mae: 0.5982 - val_loss: 0.7138 - val_mae: 0.6614
Epoch 52/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6070 - mae: 0.5927 - val_loss: 0.7440 - val_mae: 0.6711
Epoch 53/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6116 - mae: 0.5972 - val_loss: 0.7205 - val_mae: 0.6567
Epoch 54/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6215 - mae: 0.5994 - val_loss: 0.7950 - val_mae: 0.7037
Epoch 55/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6156 - mae: 0.5992 - val_loss: 0.7367 - val_mae: 0.6721
Epoch 56/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6052 - mae: 0.5931 - val_loss: 0.7610 - val_mae: 0.6795
Epoch 57/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6088 - mae: 0.5963 - val_loss: 0.7623 - val_mae: 0.6818
Epoch 58/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5991 - mae: 0.5939 - val_loss: 0.7218 - val_mae: 0.6622
Epoch 59/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6068 - mae: 0.5954 - val_loss: 0.7446 - val_mae: 0.6723
Epoch 60/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5959 - mae: 0.5890 - val_loss: 0.7588 - val_mae: 0.6787
Epoch 61/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6133 - mae: 0.5999 - val_loss: 0.7709 - val_mae: 0.6874
Epoch 62/120
189/189 [==============================] - 1s 6ms/step - loss: 0.5939 - mae: 0.5906 - val_loss: 0.7481 - val_mae: 0.6756
Epoch 63/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6166 - mae: 0.6021 - val_loss: 0.7338 - val_mae: 0.6623
Epoch 64/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5876 - mae: 0.5843 - val_loss: 0.7237 - val_mae: 0.6595
Epoch 65/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5972 - mae: 0.5901 - val_loss: 0.8089 - val_mae: 0.6965
Epoch 66/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6238 - mae: 0.5988 - val_loss: 0.7208 - val_mae: 0.6619
Epoch 1/120
189/189 [==============================] - 3s 9ms/step - loss: 27.5450 - mae: 3.7531 - val_loss: 9.6313 - val_mae: 2.3578
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 6.3607 - mae: 1.8320 - val_loss: 6.4137 - val_mae: 1.9009
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 4.1454 - mae: 1.4639 - val_loss: 4.1726 - val_mae: 1.5380
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9892 - mae: 1.2508 - val_loss: 3.3383 - val_mae: 1.3525
Epoch 5/120
189/189 [==============================] - 1s 7ms/step - loss: 2.2238 - mae: 1.0846 - val_loss: 2.4990 - val_mae: 1.1603
Epoch 6/120
189/189 [==============================] - 1s 7ms/step - loss: 1.7992 - mae: 0.9650 - val_loss: 1.9785 - val_mae: 1.0297
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 1.4256 - mae: 0.8717 - val_loss: 1.5725 - val_mae: 0.9290
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 1.1736 - mae: 0.7932 - val_loss: 1.3176 - val_mae: 0.8491
Epoch 9/120
189/189 [==============================] - 1s 7ms/step - loss: 1.0096 - mae: 0.7367 - val_loss: 1.1265 - val_mae: 0.7901
Epoch 10/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8759 - mae: 0.6916 - val_loss: 1.0406 - val_mae: 0.7731
Epoch 11/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8424 - mae: 0.6767 - val_loss: 1.0057 - val_mae: 0.7641
Epoch 12/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7608 - mae: 0.6500 - val_loss: 0.9301 - val_mae: 0.7372
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7492 - mae: 0.6431 - val_loss: 0.8927 - val_mae: 0.7285
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7258 - mae: 0.6375 - val_loss: 0.8700 - val_mae: 0.7162
Epoch 15/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7433 - mae: 0.6452 - val_loss: 0.8341 - val_mae: 0.7058
Epoch 16/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7182 - mae: 0.6346 - val_loss: 0.8264 - val_mae: 0.7042
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7007 - mae: 0.6304 - val_loss: 0.8164 - val_mae: 0.7024
Epoch 18/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6952 - mae: 0.6270 - val_loss: 0.8438 - val_mae: 0.7210
Epoch 19/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6693 - mae: 0.6154 - val_loss: 0.7708 - val_mae: 0.6772
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6700 - mae: 0.6205 - val_loss: 0.7995 - val_mae: 0.7003
Epoch 21/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6964 - mae: 0.6306 - val_loss: 0.7740 - val_mae: 0.6928
Epoch 22/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6712 - mae: 0.6214 - val_loss: 0.8077 - val_mae: 0.6998
Epoch 23/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6562 - mae: 0.6124 - val_loss: 0.7688 - val_mae: 0.6900
Epoch 24/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6486 - mae: 0.6146 - val_loss: 0.7504 - val_mae: 0.6759
Epoch 25/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6998 - mae: 0.6307 - val_loss: 1.0010 - val_mae: 0.7975
Epoch 26/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6695 - mae: 0.6175 - val_loss: 0.8050 - val_mae: 0.7024
Epoch 27/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6606 - mae: 0.6144 - val_loss: 0.7453 - val_mae: 0.6731
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6259 - mae: 0.6013 - val_loss: 0.7771 - val_mae: 0.6870
Epoch 29/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6324 - mae: 0.6086 - val_loss: 0.7900 - val_mae: 0.6958
Epoch 30/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6159 - mae: 0.6019 - val_loss: 0.7273 - val_mae: 0.6652
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6404 - mae: 0.6068 - val_loss: 0.7454 - val_mae: 0.6706
Epoch 32/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6211 - mae: 0.5980 - val_loss: 0.7086 - val_mae: 0.6575
Epoch 33/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6084 - mae: 0.5924 - val_loss: 0.7584 - val_mae: 0.6784
Epoch 34/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6608 - mae: 0.6217 - val_loss: 0.7491 - val_mae: 0.6788
Epoch 35/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6408 - mae: 0.6086 - val_loss: 0.7296 - val_mae: 0.6635
Epoch 36/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6364 - mae: 0.6119 - val_loss: 0.7131 - val_mae: 0.6598
Epoch 37/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6242 - mae: 0.6015 - val_loss: 0.7667 - val_mae: 0.6878
Epoch 38/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6303 - mae: 0.6081 - val_loss: 0.9231 - val_mae: 0.7585
Epoch 39/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6371 - mae: 0.6092 - val_loss: 0.7212 - val_mae: 0.6588
Epoch 40/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6189 - mae: 0.6016 - val_loss: 0.7360 - val_mae: 0.6675
Epoch 41/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6226 - mae: 0.6042 - val_loss: 0.7106 - val_mae: 0.6553
Epoch 42/120
189/189 [==============================] - 2s 8ms/step - loss: 0.6363 - mae: 0.6086 - val_loss: 0.7078 - val_mae: 0.6576
Epoch 43/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6029 - mae: 0.5910 - val_loss: 0.8167 - val_mae: 0.7040
Epoch 44/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6139 - mae: 0.5971 - val_loss: 0.8124 - val_mae: 0.7073
Epoch 45/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6277 - mae: 0.6007 - val_loss: 0.7767 - val_mae: 0.6878
Epoch 46/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6108 - mae: 0.5949 - val_loss: 0.7765 - val_mae: 0.6918
Epoch 47/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6475 - mae: 0.6135 - val_loss: 0.7405 - val_mae: 0.6695
Epoch 48/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6255 - mae: 0.6017 - val_loss: 0.7479 - val_mae: 0.6759
Epoch 49/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6139 - mae: 0.5968 - val_loss: 0.7030 - val_mae: 0.6530
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6177 - mae: 0.6006 - val_loss: 0.7252 - val_mae: 0.6640
Epoch 51/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6051 - mae: 0.5930 - val_loss: 0.7141 - val_mae: 0.6600
Epoch 52/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6262 - mae: 0.6079 - val_loss: 0.7604 - val_mae: 0.6787
Epoch 53/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6239 - mae: 0.6020 - val_loss: 0.8416 - val_mae: 0.7200
Epoch 54/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6093 - mae: 0.5947 - val_loss: 0.7552 - val_mae: 0.6793
Epoch 55/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6287 - mae: 0.6079 - val_loss: 0.8617 - val_mae: 0.7380
Epoch 56/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6240 - mae: 0.6011 - val_loss: 0.7092 - val_mae: 0.6499
Epoch 57/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6064 - mae: 0.6002 - val_loss: 0.7715 - val_mae: 0.6785
Epoch 58/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6307 - mae: 0.6125 - val_loss: 0.7316 - val_mae: 0.6660
Epoch 59/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6182 - mae: 0.5980 - val_loss: 0.8154 - val_mae: 0.6999
Epoch 60/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5935 - mae: 0.5897 - val_loss: 0.7440 - val_mae: 0.6666
Epoch 61/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6129 - mae: 0.6003 - val_loss: 0.7110 - val_mae: 0.6617
Epoch 62/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6072 - mae: 0.5949 - val_loss: 0.7016 - val_mae: 0.6516
Epoch 63/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6103 - mae: 0.5997 - val_loss: 0.7586 - val_mae: 0.6769
Epoch 64/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6086 - mae: 0.5999 - val_loss: 0.7323 - val_mae: 0.6637
Epoch 65/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6149 - mae: 0.6004 - val_loss: 0.7227 - val_mae: 0.6610
Epoch 66/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6373 - mae: 0.6093 - val_loss: 0.7388 - val_mae: 0.6743
Epoch 67/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6320 - mae: 0.6116 - val_loss: 0.6995 - val_mae: 0.6504
Epoch 68/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6003 - mae: 0.5947 - val_loss: 0.7149 - val_mae: 0.6558
Epoch 69/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5970 - mae: 0.5896 - val_loss: 0.7000 - val_mae: 0.6494
Epoch 70/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6063 - mae: 0.5917 - val_loss: 0.7107 - val_mae: 0.6563
Epoch 71/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5956 - mae: 0.5927 - val_loss: 0.7099 - val_mae: 0.6543
Epoch 72/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6032 - mae: 0.5950 - val_loss: 0.8491 - val_mae: 0.7196
Epoch 73/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5951 - mae: 0.5916 - val_loss: 0.6824 - val_mae: 0.6465
Epoch 74/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6767 - mae: 0.6267 - val_loss: 0.8037 - val_mae: 0.6976
Epoch 75/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6322 - mae: 0.6079 - val_loss: 0.7111 - val_mae: 0.6557
Epoch 76/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6064 - mae: 0.5986 - val_loss: 0.7040 - val_mae: 0.6513
Epoch 77/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6003 - mae: 0.5911 - val_loss: 0.8245 - val_mae: 0.7072
Epoch 78/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6030 - mae: 0.5959 - val_loss: 0.8950 - val_mae: 0.7620
Epoch 79/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6026 - mae: 0.5915 - val_loss: 0.7022 - val_mae: 0.6525
Epoch 80/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6001 - mae: 0.5976 - val_loss: 0.8361 - val_mae: 0.7123
Epoch 81/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6048 - mae: 0.5971 - val_loss: 0.7112 - val_mae: 0.6514
Epoch 82/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5916 - mae: 0.5868 - val_loss: 0.8201 - val_mae: 0.7104
Epoch 83/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5961 - mae: 0.5914 - val_loss: 0.7278 - val_mae: 0.6627
Epoch 84/120
189/189 [==============================] - 1s 8ms/step - loss: 0.5948 - mae: 0.5903 - val_loss: 0.8148 - val_mae: 0.7067
Epoch 85/120
189/189 [==============================] - 1s 8ms/step - loss: 0.5875 - mae: 0.5849 - val_loss: 0.6971 - val_mae: 0.6470
Epoch 86/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5871 - mae: 0.5867 - val_loss: 0.7656 - val_mae: 0.6924
Epoch 87/120
189/189 [==============================] - 1s 8ms/step - loss: 0.5963 - mae: 0.5907 - val_loss: 0.7785 - val_mae: 0.6858
Epoch 88/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5932 - mae: 0.5918 - val_loss: 0.7697 - val_mae: 0.6822
Epoch 1/120
189/189 [==============================] - 3s 9ms/step - loss: 33.9651 - mae: 4.2580 - val_loss: 16.9745 - val_mae: 3.2227
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 12.9371 - mae: 2.7065 - val_loss: 14.4041 - val_mae: 2.9067
Epoch 3/120
189/189 [==============================] - 1s 8ms/step - loss: 11.2938 - mae: 2.5312 - val_loss: 12.8566 - val_mae: 2.7407
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 10.2553 - mae: 2.4172 - val_loss: 11.9095 - val_mae: 2.6340
Epoch 5/120
189/189 [==============================] - 1s 7ms/step - loss: 9.8175 - mae: 2.3914 - val_loss: 10.8945 - val_mae: 2.5618
Epoch 6/120
189/189 [==============================] - 1s 7ms/step - loss: 9.4369 - mae: 2.3337 - val_loss: 10.6322 - val_mae: 2.5477
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 9.3634 - mae: 2.3348 - val_loss: 10.6807 - val_mae: 2.5054
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 9.2245 - mae: 2.3088 - val_loss: 10.4154 - val_mae: 2.5012
Epoch 9/120
189/189 [==============================] - 1s 7ms/step - loss: 9.1463 - mae: 2.3016 - val_loss: 11.2599 - val_mae: 2.5247
Epoch 10/120
189/189 [==============================] - 1s 8ms/step - loss: 9.1213 - mae: 2.3117 - val_loss: 10.3469 - val_mae: 2.5185
Epoch 11/120
189/189 [==============================] - 1s 7ms/step - loss: 9.2780 - mae: 2.3415 - val_loss: 10.7591 - val_mae: 2.4982
Epoch 12/120
189/189 [==============================] - 1s 8ms/step - loss: 9.2096 - mae: 2.3185 - val_loss: 10.8900 - val_mae: 2.5008
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 9.1271 - mae: 2.3190 - val_loss: 10.5281 - val_mae: 2.4882
Epoch 14/120
189/189 [==============================] - 1s 8ms/step - loss: 9.1241 - mae: 2.3226 - val_loss: 10.6643 - val_mae: 2.4979
Epoch 15/120
189/189 [==============================] - 1s 7ms/step - loss: 9.1707 - mae: 2.3150 - val_loss: 10.3638 - val_mae: 2.5095
Epoch 16/120
189/189 [==============================] - 1s 8ms/step - loss: 9.0890 - mae: 2.3160 - val_loss: 10.2628 - val_mae: 2.5106
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 9.0057 - mae: 2.3081 - val_loss: 10.4261 - val_mae: 2.5698
Epoch 18/120
189/189 [==============================] - 1s 8ms/step - loss: 9.0866 - mae: 2.3091 - val_loss: 10.3138 - val_mae: 2.5307
Epoch 19/120
189/189 [==============================] - 1s 7ms/step - loss: 9.1337 - mae: 2.3238 - val_loss: 11.1429 - val_mae: 2.5211
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 9.0392 - mae: 2.3025 - val_loss: 10.3060 - val_mae: 2.5097
Epoch 21/120
189/189 [==============================] - 1s 7ms/step - loss: 9.0582 - mae: 2.3126 - val_loss: 10.4586 - val_mae: 2.5394
Epoch 22/120
189/189 [==============================] - 1s 7ms/step - loss: 8.9371 - mae: 2.2983 - val_loss: 10.6967 - val_mae: 2.5449
Epoch 23/120
189/189 [==============================] - 1s 8ms/step - loss: 8.9941 - mae: 2.3082 - val_loss: 10.4153 - val_mae: 2.5070
Epoch 24/120
189/189 [==============================] - 1s 7ms/step - loss: 9.0550 - mae: 2.3124 - val_loss: 10.3318 - val_mae: 2.5093
Epoch 25/120
189/189 [==============================] - 1s 7ms/step - loss: 8.9632 - mae: 2.2921 - val_loss: 10.6731 - val_mae: 2.6136
Epoch 26/120
189/189 [==============================] - 1s 8ms/step - loss: 8.9853 - mae: 2.3036 - val_loss: 10.6101 - val_mae: 2.6122
Epoch 27/120
189/189 [==============================] - 1s 8ms/step - loss: 9.0183 - mae: 2.2992 - val_loss: 10.5447 - val_mae: 2.5083
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 8.9145 - mae: 2.2853 - val_loss: 10.4754 - val_mae: 2.4955
Epoch 29/120
189/189 [==============================] - 1s 8ms/step - loss: 9.0905 - mae: 2.3136 - val_loss: 11.0337 - val_mae: 2.5075
Epoch 30/120
189/189 [==============================] - 1s 7ms/step - loss: 8.8960 - mae: 2.2843 - val_loss: 10.4398 - val_mae: 2.5317
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 8.9973 - mae: 2.3137 - val_loss: 10.8342 - val_mae: 2.4977
Epoch 1/120
189/189 [==============================] - 3s 9ms/step - loss: 27.6469 - mae: 3.7721 - val_loss: 11.9337 - val_mae: 2.6963
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 8.6783 - mae: 2.1629 - val_loss: 8.8065 - val_mae: 2.2549
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 6.2620 - mae: 1.8547 - val_loss: 6.7910 - val_mae: 1.9715
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 4.8690 - mae: 1.6405 - val_loss: 5.2693 - val_mae: 1.7295
Epoch 5/120
189/189 [==============================] - 1s 7ms/step - loss: 3.9947 - mae: 1.4943 - val_loss: 4.4022 - val_mae: 1.5844
Epoch 6/120
189/189 [==============================] - 1s 7ms/step - loss: 3.4576 - mae: 1.3855 - val_loss: 3.8800 - val_mae: 1.4716
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 3.1854 - mae: 1.3452 - val_loss: 3.9502 - val_mae: 1.4801
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 3.0675 - mae: 1.3288 - val_loss: 3.5766 - val_mae: 1.4295
Epoch 9/120
189/189 [==============================] - 1s 7ms/step - loss: 3.0437 - mae: 1.3278 - val_loss: 3.3734 - val_mae: 1.4247
Epoch 10/120
189/189 [==============================] - 1s 7ms/step - loss: 3.0986 - mae: 1.3435 - val_loss: 3.4318 - val_mae: 1.4505
Epoch 11/120
189/189 [==============================] - 2s 8ms/step - loss: 3.0011 - mae: 1.3220 - val_loss: 3.3882 - val_mae: 1.4169
Epoch 12/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9368 - mae: 1.3020 - val_loss: 3.4790 - val_mae: 1.4315
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8391 - mae: 1.2875 - val_loss: 3.5261 - val_mae: 1.4619
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9151 - mae: 1.3039 - val_loss: 3.3631 - val_mae: 1.4097
Epoch 15/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8766 - mae: 1.2911 - val_loss: 3.3491 - val_mae: 1.4096
Epoch 16/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9230 - mae: 1.3124 - val_loss: 3.3134 - val_mae: 1.4090
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9043 - mae: 1.3096 - val_loss: 3.3760 - val_mae: 1.4414
Epoch 18/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8965 - mae: 1.2883 - val_loss: 3.4638 - val_mae: 1.4694
Epoch 19/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8550 - mae: 1.2844 - val_loss: 3.2612 - val_mae: 1.4062
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8240 - mae: 1.2741 - val_loss: 3.4415 - val_mae: 1.4789
Epoch 21/120
189/189 [==============================] - 1s 8ms/step - loss: 2.8642 - mae: 1.2828 - val_loss: 3.3267 - val_mae: 1.4087
Epoch 22/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8075 - mae: 1.2790 - val_loss: 3.4459 - val_mae: 1.4271
Epoch 23/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7687 - mae: 1.2752 - val_loss: 3.2168 - val_mae: 1.4010
Epoch 24/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8344 - mae: 1.2780 - val_loss: 3.3493 - val_mae: 1.4115
Epoch 25/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7741 - mae: 1.2702 - val_loss: 3.4237 - val_mae: 1.4708
Epoch 26/120
189/189 [==============================] - 1s 8ms/step - loss: 2.8779 - mae: 1.2909 - val_loss: 3.4713 - val_mae: 1.4515
Epoch 27/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8472 - mae: 1.2907 - val_loss: 3.3931 - val_mae: 1.4513
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 2.8275 - mae: 1.2765 - val_loss: 3.2394 - val_mae: 1.4157
Epoch 29/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7854 - mae: 1.2705 - val_loss: 3.2944 - val_mae: 1.4112
Epoch 30/120
189/189 [==============================] - 1s 7ms/step - loss: 2.7871 - mae: 1.2695 - val_loss: 3.3025 - val_mae: 1.4185
Epoch 31/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7618 - mae: 1.2608 - val_loss: 3.4450 - val_mae: 1.4362
Epoch 32/120
189/189 [==============================] - 1s 7ms/step - loss: 2.7477 - mae: 1.2592 - val_loss: 3.2629 - val_mae: 1.4033
Epoch 33/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7653 - mae: 1.2610 - val_loss: 3.3098 - val_mae: 1.4135
Epoch 34/120
189/189 [==============================] - 1s 7ms/step - loss: 2.7601 - mae: 1.2645 - val_loss: 3.4005 - val_mae: 1.4430
Epoch 35/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7635 - mae: 1.2627 - val_loss: 3.3106 - val_mae: 1.4108
Epoch 36/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7331 - mae: 1.2499 - val_loss: 3.2502 - val_mae: 1.4269
Epoch 37/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7603 - mae: 1.2648 - val_loss: 3.2716 - val_mae: 1.4152
Epoch 38/120
189/189 [==============================] - 1s 8ms/step - loss: 2.7306 - mae: 1.2484 - val_loss: 3.3098 - val_mae: 1.4186
Epoch 1/120
189/189 [==============================] - 3s 10ms/step - loss: 24.6330 - mae: 3.5781 - val_loss: 9.3742 - val_mae: 2.3531
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 6.1415 - mae: 1.8105 - val_loss: 5.8992 - val_mae: 1.8564
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 4.1412 - mae: 1.4738 - val_loss: 4.3799 - val_mae: 1.5727
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 2.9542 - mae: 1.2501 - val_loss: 3.4121 - val_mae: 1.3740
Epoch 5/120
189/189 [==============================] - 2s 8ms/step - loss: 2.1971 - mae: 1.0710 - val_loss: 2.3266 - val_mae: 1.1254
Epoch 6/120
189/189 [==============================] - 1s 8ms/step - loss: 1.6778 - mae: 0.9362 - val_loss: 1.9994 - val_mae: 1.0397
Epoch 7/120
189/189 [==============================] - 1s 8ms/step - loss: 1.3940 - mae: 0.8693 - val_loss: 1.5723 - val_mae: 0.9193
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 1.1315 - mae: 0.7750 - val_loss: 1.5600 - val_mae: 0.9456
Epoch 9/120
189/189 [==============================] - 1s 8ms/step - loss: 0.9585 - mae: 0.7188 - val_loss: 1.1277 - val_mae: 0.7971
Epoch 10/120
189/189 [==============================] - 1s 8ms/step - loss: 0.8645 - mae: 0.6863 - val_loss: 0.9998 - val_mae: 0.7586
Epoch 11/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8076 - mae: 0.6627 - val_loss: 0.9716 - val_mae: 0.7557
Epoch 12/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7557 - mae: 0.6423 - val_loss: 0.9473 - val_mae: 0.7475
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7209 - mae: 0.6289 - val_loss: 0.9360 - val_mae: 0.7400
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7346 - mae: 0.6429 - val_loss: 1.0481 - val_mae: 0.7982
Epoch 15/120
189/189 [==============================] - 2s 8ms/step - loss: 0.7153 - mae: 0.6362 - val_loss: 0.8082 - val_mae: 0.6973
Epoch 16/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7077 - mae: 0.6335 - val_loss: 0.8484 - val_mae: 0.7056
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7017 - mae: 0.6292 - val_loss: 0.8223 - val_mae: 0.7023
Epoch 18/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6627 - mae: 0.6139 - val_loss: 0.8188 - val_mae: 0.7057
Epoch 19/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6761 - mae: 0.6195 - val_loss: 0.7719 - val_mae: 0.6772
Epoch 20/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7002 - mae: 0.6306 - val_loss: 0.7953 - val_mae: 0.6899
Epoch 21/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6828 - mae: 0.6227 - val_loss: 0.8045 - val_mae: 0.6973
Epoch 22/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6783 - mae: 0.6233 - val_loss: 0.7934 - val_mae: 0.6921
Epoch 23/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7009 - mae: 0.6363 - val_loss: 0.8330 - val_mae: 0.7114
Epoch 24/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7124 - mae: 0.6393 - val_loss: 0.8413 - val_mae: 0.7143
Epoch 25/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6833 - mae: 0.6261 - val_loss: 0.8249 - val_mae: 0.7041
Epoch 26/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6784 - mae: 0.6227 - val_loss: 0.8106 - val_mae: 0.7018
Epoch 27/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6503 - mae: 0.6059 - val_loss: 0.7563 - val_mae: 0.6808
Epoch 28/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6905 - mae: 0.6273 - val_loss: 0.7534 - val_mae: 0.6776
Epoch 29/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6692 - mae: 0.6227 - val_loss: 0.8005 - val_mae: 0.7044
Epoch 30/120
189/189 [==============================] - 1s 7ms/step - loss: 0.8131 - mae: 0.6876 - val_loss: 0.9004 - val_mae: 0.7525
Epoch 31/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6743 - mae: 0.6244 - val_loss: 0.7436 - val_mae: 0.6759
Epoch 32/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6366 - mae: 0.6079 - val_loss: 0.8233 - val_mae: 0.7235
Epoch 33/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6404 - mae: 0.6103 - val_loss: 0.8357 - val_mae: 0.7161
Epoch 34/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6327 - mae: 0.6081 - val_loss: 0.8103 - val_mae: 0.7026
Epoch 35/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6505 - mae: 0.6122 - val_loss: 0.8216 - val_mae: 0.7106
Epoch 36/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6246 - mae: 0.6080 - val_loss: 0.7222 - val_mae: 0.6620
Epoch 37/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6598 - mae: 0.6203 - val_loss: 0.7251 - val_mae: 0.6688
Epoch 38/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6175 - mae: 0.5983 - val_loss: 0.7082 - val_mae: 0.6595
Epoch 39/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6236 - mae: 0.6043 - val_loss: 0.7209 - val_mae: 0.6610
Epoch 40/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6218 - mae: 0.6008 - val_loss: 0.7508 - val_mae: 0.6778
Epoch 41/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6415 - mae: 0.6113 - val_loss: 0.7591 - val_mae: 0.6793
Epoch 42/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6163 - mae: 0.5983 - val_loss: 0.7000 - val_mae: 0.6504
Epoch 43/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6140 - mae: 0.5967 - val_loss: 0.7244 - val_mae: 0.6661
Epoch 44/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6190 - mae: 0.6000 - val_loss: 0.7466 - val_mae: 0.6777
Epoch 45/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6120 - mae: 0.5943 - val_loss: 0.7643 - val_mae: 0.6902
Epoch 46/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6415 - mae: 0.6151 - val_loss: 0.7468 - val_mae: 0.6711
Epoch 47/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6130 - mae: 0.5903 - val_loss: 0.7531 - val_mae: 0.6771
Epoch 48/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6382 - mae: 0.6062 - val_loss: 0.7442 - val_mae: 0.6743
Epoch 49/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6367 - mae: 0.6104 - val_loss: 0.8159 - val_mae: 0.7058
Epoch 50/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6003 - mae: 0.5952 - val_loss: 0.7227 - val_mae: 0.6670
Epoch 51/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6282 - mae: 0.6080 - val_loss: 0.7273 - val_mae: 0.6611
Epoch 52/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6176 - mae: 0.6018 - val_loss: 0.7724 - val_mae: 0.6907
Epoch 53/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6133 - mae: 0.5947 - val_loss: 0.7174 - val_mae: 0.6622
Epoch 54/120
189/189 [==============================] - 1s 8ms/step - loss: 0.6254 - mae: 0.6045 - val_loss: 0.7844 - val_mae: 0.6888
Epoch 55/120
189/189 [==============================] - 2s 8ms/step - loss: 0.6192 - mae: 0.5996 - val_loss: 0.7636 - val_mae: 0.6882
Epoch 56/120
189/189 [==============================] - 2s 8ms/step - loss: 0.6298 - mae: 0.6060 - val_loss: 0.7537 - val_mae: 0.6783
Epoch 57/120
189/189 [==============================] - 2s 8ms/step - loss: 0.6115 - mae: 0.5970 - val_loss: 0.7095 - val_mae: 0.6549
Epoch 1/120
189/189 [==============================] - 3s 10ms/step - loss: 25.9326 - mae: 3.6498 - val_loss: 10.0485 - val_mae: 2.3923
Epoch 2/120
189/189 [==============================] - 1s 7ms/step - loss: 6.4171 - mae: 1.8206 - val_loss: 5.6008 - val_mae: 1.7908
Epoch 3/120
189/189 [==============================] - 1s 7ms/step - loss: 3.6777 - mae: 1.3675 - val_loss: 3.5144 - val_mae: 1.4066
Epoch 4/120
189/189 [==============================] - 1s 7ms/step - loss: 2.5691 - mae: 1.1451 - val_loss: 2.6789 - val_mae: 1.2060
Epoch 5/120
189/189 [==============================] - 1s 7ms/step - loss: 1.8663 - mae: 0.9812 - val_loss: 1.9042 - val_mae: 1.0264
Epoch 6/120
189/189 [==============================] - 1s 8ms/step - loss: 1.4158 - mae: 0.8475 - val_loss: 1.5056 - val_mae: 0.9161
Epoch 7/120
189/189 [==============================] - 1s 7ms/step - loss: 1.1239 - mae: 0.7615 - val_loss: 1.3505 - val_mae: 0.8850
Epoch 8/120
189/189 [==============================] - 1s 7ms/step - loss: 0.9217 - mae: 0.6989 - val_loss: 1.0058 - val_mae: 0.7595
Epoch 9/120
189/189 [==============================] - 1s 7ms/step - loss: 0.7731 - mae: 0.6335 - val_loss: 0.8483 - val_mae: 0.6751
Epoch 10/120
189/189 [==============================] - 1s 7ms/step - loss: 0.6447 - mae: 0.5780 - val_loss: 0.7200 - val_mae: 0.6381
Epoch 11/120
189/189 [==============================] - 1s 7ms/step - loss: 0.5687 - mae: 0.5448 - val_loss: 0.6013 - val_mae: 0.5752
Epoch 12/120
189/189 [==============================] - 1s 8ms/step - loss: 0.4668 - mae: 0.5008 - val_loss: 0.6732 - val_mae: 0.6200
Epoch 13/120
189/189 [==============================] - 1s 7ms/step - loss: 0.4381 - mae: 0.4836 - val_loss: 0.4538 - val_mae: 0.4994
Epoch 14/120
189/189 [==============================] - 1s 7ms/step - loss: 0.3560 - mae: 0.4399 - val_loss: 0.3939 - val_mae: 0.4666
Epoch 15/120
189/189 [==============================] - 1s 7ms/step - loss: 0.3039 - mae: 0.4040 - val_loss: 0.3560 - val_mae: 0.4481
Epoch 16/120
189/189 [==============================] - 1s 8ms/step - loss: 0.2579 - mae: 0.3722 - val_loss: 0.3528 - val_mae: 0.4510
Epoch 17/120
189/189 [==============================] - 1s 7ms/step - loss: 0.2402 - mae: 0.3555 - val_loss: 0.2658 - val_mae: 0.3904
Epoch 18/120
189/189 [==============================] - 1s 8ms/step - loss: 0.2132 - mae: 0.3413 - val_loss: 0.2498 - val_mae: 0.3830
Epoch 19/120
189/189 [==============================] - 1s 7ms/step - loss: 0.2063 - mae: 0.3378 - val_loss: 0.2962 - val_mae: 0.4159
Epoch 20/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1850 - mae: 0.3167 - val_loss: 0.2341 - val_mae: 0.3693
Epoch 21/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1805 - mae: 0.3177 - val_loss: 0.2144 - val_mae: 0.3529
Epoch 22/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1857 - mae: 0.3162 - val_loss: 0.4981 - val_mae: 0.5510
Epoch 23/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1894 - mae: 0.3220 - val_loss: 0.1919 - val_mae: 0.3361
Epoch 24/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1628 - mae: 0.2995 - val_loss: 0.1946 - val_mae: 0.3382
Epoch 25/120
189/189 [==============================] - 2s 10ms/step - loss: 0.1505 - mae: 0.2907 - val_loss: 0.1646 - val_mae: 0.3149
Epoch 26/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1483 - mae: 0.2903 - val_loss: 0.1849 - val_mae: 0.3351
Epoch 27/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1384 - mae: 0.2802 - val_loss: 0.1542 - val_mae: 0.3025
Epoch 28/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1399 - mae: 0.2839 - val_loss: 0.1903 - val_mae: 0.3444
Epoch 29/120
189/189 [==============================] - 1s 7ms/step - loss: 0.2237 - mae: 0.3491 - val_loss: 0.1760 - val_mae: 0.3187
Epoch 30/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1549 - mae: 0.2980 - val_loss: 0.1725 - val_mae: 0.3195
Epoch 31/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1335 - mae: 0.2746 - val_loss: 0.1441 - val_mae: 0.2923
Epoch 32/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1404 - mae: 0.2774 - val_loss: 0.1629 - val_mae: 0.3108
Epoch 33/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1209 - mae: 0.2614 - val_loss: 0.1515 - val_mae: 0.3025
Epoch 34/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1413 - mae: 0.2857 - val_loss: 0.1321 - val_mae: 0.2805
Epoch 35/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1381 - mae: 0.2833 - val_loss: 0.1319 - val_mae: 0.2809
Epoch 36/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1219 - mae: 0.2642 - val_loss: 0.1764 - val_mae: 0.3302
Epoch 37/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1333 - mae: 0.2759 - val_loss: 0.1513 - val_mae: 0.3017
Epoch 38/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1276 - mae: 0.2718 - val_loss: 0.1473 - val_mae: 0.2982
Epoch 39/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1188 - mae: 0.2616 - val_loss: 0.1463 - val_mae: 0.2992
Epoch 40/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1104 - mae: 0.2539 - val_loss: 0.1217 - val_mae: 0.2706
Epoch 41/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1101 - mae: 0.2531 - val_loss: 0.2228 - val_mae: 0.3797
Epoch 42/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1352 - mae: 0.2838 - val_loss: 0.1700 - val_mae: 0.3260
Epoch 43/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1219 - mae: 0.2656 - val_loss: 0.1838 - val_mae: 0.3423
Epoch 44/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1104 - mae: 0.2570 - val_loss: 0.1256 - val_mae: 0.2760
Epoch 45/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1123 - mae: 0.2595 - val_loss: 0.1269 - val_mae: 0.2758
Epoch 46/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1174 - mae: 0.2539 - val_loss: 0.1324 - val_mae: 0.2823
Epoch 47/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1107 - mae: 0.2523 - val_loss: 0.1143 - val_mae: 0.2672
Epoch 48/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1074 - mae: 0.2515 - val_loss: 0.1278 - val_mae: 0.2791
Epoch 49/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1062 - mae: 0.2489 - val_loss: 0.1175 - val_mae: 0.2677
Epoch 50/120
189/189 [==============================] - 1s 7ms/step - loss: 0.1111 - mae: 0.2535 - val_loss: 0.1185 - val_mae: 0.2694
Epoch 51/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1054 - mae: 0.2492 - val_loss: 0.1419 - val_mae: 0.2990
Epoch 52/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1050 - mae: 0.2480 - val_loss: 0.1408 - val_mae: 0.2933
Epoch 53/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1413 - mae: 0.2838 - val_loss: 0.2014 - val_mae: 0.3499
Epoch 54/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1466 - mae: 0.2924 - val_loss: 0.1571 - val_mae: 0.3047
Epoch 55/120
189/189 [==============================] - 1s 7ms/step - loss: 0.1172 - mae: 0.2630 - val_loss: 0.1225 - val_mae: 0.2741
Epoch 56/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1208 - mae: 0.2662 - val_loss: 0.1681 - val_mae: 0.3249
Epoch 57/120
189/189 [==============================] - 1s 8ms/step - loss: 0.2160 - mae: 0.3172 - val_loss: 0.3956 - val_mae: 0.5003
Epoch 58/120
189/189 [==============================] - 1s 8ms/step - loss: 0.1930 - mae: 0.3262 - val_loss: 0.1531 - val_mae: 0.3008
Epoch 59/120
189/189 [==============================] - 1s 7ms/step - loss: 0.1290 - mae: 0.2710 - val_loss: 0.1452 - val_mae: 0.3029
Epoch 60/120
189/189 [==============================] - 2s 9ms/step - loss: 0.1217 - mae: 0.2650 - val_loss: 0.1255 - val_mae: 0.2748
Epoch 61/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1130 - mae: 0.2553 - val_loss: 0.1238 - val_mae: 0.2726
Epoch 62/120
189/189 [==============================] - 2s 8ms/step - loss: 0.1137 - mae: 0.2550 - val_loss: 0.1246 - val_mae: 0.2730
###Markdown
Analyse the Grid Search Results
###Code
#load it from drive
records = pd.read_csv('records_1622140823.csv')
records2 = pd.read_csv('records_1622141750.csv')
gs = pd.concat([records,records2])
gs.groupby('length')['val_mae_og'].mean().plot(kind='bar', title='mean'); plt.show()
gs.groupby('layers_num')['val_mae_og'].mean().plot(kind='bar', title='mean'); plt.show()
gs.groupby('layers_type')['val_mae_og'].mean().plot(kind='bar', title='mean'); plt.show()
gs.groupby('units')['val_mae_og'].mean().plot(kind='bar', title='mean'); plt.show()
gs.groupby('g_filt')['val_mae_og'].mean().plot(kind='bar', title='mean'); plt.show()
###Output
_____no_output_____
###Markdown
COMMENTARYNot too disimlar to the temp model, though it does better with longer series fed into it. Recreate the best model and compare against test data
###Code
best_model_params = gs.sort_values('val_mae_og').iloc[0]
best_model_params
best_model = BuildModel(model_name='best_windspeed_model.h5', length=30, layers_num=2,\
layers_type='LSTM', units=40, dropout=0, g_filt=1, epochs=120, batch_size=10,\
patience=15)
best_model.setupData(wind_train)
best_model.fitModel()
#load best performer
best_model.loadModel()
#predict a week
week_pred = best_model.predAhead(7)
#plot against test week
best_model.plotPreds(week_pred, wind_test, ylabel='windspeed')
###Output
_____no_output_____
###Markdown
COMMENTARYFirst pred is a fair bit off. Rest of the series looks okay.
###Code
###Output
_____no_output_____ |
notebooks/concepts/Minimal Specviz.ipynb | ###Markdown
A Minimal Specviz+notebook WorkflowThis notebook provides a short example of combining the Specviz interactive visualization tool of the `jdaviz` package with a more traditional non-interactive Python workflow. The science case is loading a single 1D-spectrum (from the [Sloan Digital Sky Survey](https://www.sdss.org/)) and measuring the flux in a single spectral line (${\rm H}\alpha$). We begin by creating an instance of the `Specviz` helper class, which provides a range of conveniences for the discerning astronomy to easily work with the visualization tool. Ending the cell with the `.app` attribute of that instance will show the viz tool.
###Code
from jdaviz import Specviz
specviz = Specviz()
specviz.app
###Output
_____no_output_____
###Markdown
The above is currently empty. While one could use the "import" option to find a local file on disk, a notebook workflow is more amenable to downloading and loading a spectrum directly in Python code. To do this, we load our spectrum using the `specutils` package. This provides maximum flexibility because `Spectrum1D` objects can either be created from local data files, URLs (as shown below), or manually from user-provided arrays.We then use the `Specviz.load_data` method to load the data into the array - this should then immediately show the spectrum in the cell above.
###Code
import specutils
spec_url = 'https://dr14.sdss.org/optical/spectrum/view/data/format=fits/spec=lite?plateid=1323&mjd=52797&fiberid=12'
spec = specutils.Spectrum1D.read(spec_url, cache=True)
specviz.load_spectrum(spec)
###Output
_____no_output_____
###Markdown
That spectrum looks great! But the line we are looking for is pretty narrow. We could use the UI to zoom, which can be done using the pan/zoom tool, but you can also execute the cell below to zoom the view in on the region around ${\rm H}\alpha$:
###Code
# zoom in on Halpha region
v = specviz.app.get_viewer('spectrum-viewer')
v.state.x_min = 6500
v.state.x_max = 6750
###Output
_____no_output_____
###Markdown
If the spectrum has uncertainties, we can display them as a shadded band around the spectral trace.
###Code
v.show_uncertainties()
###Output
_____no_output_____
###Markdown
If the spectrum has masked data points, we can mark them on the plot.
###Code
v.show_mask()
###Output
_____no_output_____
###Markdown
This erases the unceratinties and masks from the plot.
###Code
v.clean()
###Output
_____no_output_____
###Markdown
Now use the Glupyter range selection tool (expand the menu and choose the second tool), and select the area around the ${\rm H}\alpha$ line. Then you can execute the cell below to get that selection into a format `specutils` understands:
###Code
line_region = specviz.get_spectral_regions()['Subset 1']
line_region
# To reproduce the exact values this notebook was written assuming, uncomment the below
# line_region = specutils.SpectralRegion(6557.48830955*u.angstrom, 6584.69919391*u.angstrom)
###Output
_____no_output_____
###Markdown
Now with that region selected, we can build a Gaussian + Constant continuum model to fit the selected line, and then fit it to just the data in the selected region:
###Code
from astropy.modeling import models
from specutils.fitting import fit_lines
from specutils import manipulation
line_model_guess = models.Gaussian1D(mean=(line_region.lower + line_region.upper)/2,
stddev=3,
amplitude=1000) + models.Const1D(200)
#fit that model to the selected region
# after a bug fix, the below should just be a single line:
# fit_lines(spec, line_model_guess, window=line_region)
extracted = manipulation.extract_region(spec, line_region)
extracted.mask[:] = False
fitted_line = fit_lines(extracted, line_model_guess)
fitted_line
###Output
_____no_output_____
###Markdown
Now we plot that model with the spectrum to examine the fit:
###Code
import numpy as np
from matplotlib import pyplot as plt
from astropy import units as u
plt.plot(spec.spectral_axis, spec.flux, lw=3)
model_lamb = np.linspace(v.state.x_min, v.state.x_max, 1000)*u.angstrom
plt.plot(model_lamb, fitted_line(model_lamb), '-', lw=2)
plt.xlim(v.state.x_min, v.state.x_max)
plt.ylim(v.state.y_min, v.state.y_max);
###Output
_____no_output_____
###Markdown
Looks good! Now to achieve the final goal of a line flux measurement, we can integrate over the line:
###Code
from scipy.integrate import quad
quad(fitted_line.unitless_model.left, 6500, 6700)[0] * fitted_line.return_units*spec.spectral_axis.unit
###Output
_____no_output_____
###Markdown
A Minimal Specviz+notebook WorkflowThis notebook provides a short example of combining the Specviz interactive visualization tool of the `jdaviz` package with a more traditional non-interactive Python workflow. The science case is loading a single 1D-spectrum (from the [Sloan Digital Sky Survey](https://www.sdss.org/)) and measuring the flux in a single spectral line (${\rm H}\alpha$). We begin by creating an instance of the `Specviz` helper class, which provides a range of conveniences for the discerning astronomy to easily work with the visualization tool. Ending the cell with the `.app` attribute of that instance will show the viz tool.
###Code
from jdaviz import SpecViz
specviz = SpecViz()
specviz.app
###Output
_____no_output_____
###Markdown
The above is currently empty. While one could use the "import" option to find a local file on disk, a notebook workflow is more amenable to downloading and loading a spectrum directly in Python code. To do this, we load our spectrum using the `specutils` package. This provides maximum flexibility because `Spectrum1D` objects can either be created from local data files, URLs (as shown below), or manually from user-provided arrays.We then use the `Specviz.load_data` method to load the data into the array - this should then immediately show the spectrum in the cell above.
###Code
import specutils
spec_url = 'https://dr14.sdss.org/optical/spectrum/view/data/format=fits/spec=lite?plateid=1323&mjd=52797&fiberid=12'
spec = specutils.Spectrum1D.read(spec_url, cache=True)
specviz.load_spectrum(spec)
###Output
_____no_output_____
###Markdown
That spectrum looks great! But the line we are looking for is pretty narrow. We could use the UI to zoom, which can be done using the pan/zoom tool, but you can also execute the cell below to zoom the view in on the region around ${\rm H}\alpha$:
###Code
# zoom in on Halpha region
v = specviz.app.get_viewer('spectrum-viewer')
v.state.x_min = 6500
v.state.x_max = 6750
###Output
_____no_output_____
###Markdown
If the spectrum has uncertainties, we can display them as a shadded band around the spectral trace.
###Code
v.show_uncertainties()
###Output
_____no_output_____
###Markdown
If the spectrum has masked data points, we can mark them on the plot.
###Code
v.show_mask()
###Output
_____no_output_____
###Markdown
This erases the unceratinties and masks from the plot.
###Code
v.clean()
###Output
_____no_output_____
###Markdown
Now use the Glupyter range selection tool (expand the menu and choose the second tool), and select the area around the ${\rm H}\alpha$ line. Then you can execute the cell below to get that selection into a format `specutils` understands:
###Code
line_region = specviz.get_spectral_regions()['Subset 1']
line_region
# To reproduce the exact values this notebook was written assuming, uncomment the below
# line_region = specutils.SpectralRegion(6557.48830955*u.angstrom, 6584.69919391*u.angstrom)
###Output
_____no_output_____
###Markdown
Now with that region selected, we can build a Gaussian + Constant continuum model to fit the selected line, and then fit it to just the data in the selected region:
###Code
from astropy.modeling import models
from specutils.fitting import fit_lines
from specutils import manipulation
line_model_guess = models.Gaussian1D(mean=(line_region.lower + line_region.upper)/2,
stddev=3,
amplitude=1000) + models.Const1D(200)
#fit that model to the selected region
# after a bug fix, the below should just be a single line:
# fit_lines(spec, line_model_guess, window=line_region)
extracted = manipulation.extract_region(spec, line_region)
extracted.mask[:] = False
fitted_line = fit_lines(extracted, line_model_guess)
fitted_line
###Output
_____no_output_____
###Markdown
Now we plot that model with the spectrum to examine the fit:
###Code
import numpy as np
from matplotlib import pyplot as plt
from astropy import units as u
plt.plot(spec.spectral_axis, spec.flux, lw=3)
model_lamb = np.linspace(v.state.x_min, v.state.x_max, 1000)*u.angstrom
plt.plot(model_lamb, fitted_line(model_lamb), '-', lw=2)
plt.xlim(v.state.x_min, v.state.x_max)
plt.ylim(v.state.y_min, v.state.y_max);
###Output
_____no_output_____
###Markdown
Looks good! Now to achieve the final goal of a line flux measurement, we can integrate over the line:
###Code
from scipy.integrate import quad
quad(fitted_line.unitless_model.left, 6500, 6700)[0] * fitted_line.return_units*spec.spectral_axis.unit
###Output
_____no_output_____ |
AAS-18-290_6DOF_manuscript/Run/Run_4km_terminal/test-9km.ipynb | ###Markdown
Optimize Policy
###Code
from env_mdr import Env
from reward_terminal_mdr import Reward
import env_utils as envu
import attitude_utils as attu
from dynamics_model import Dynamics_model
from lander_model import Lander_model
from ic_gen import Landing_icgen
from agent_mdr2 import Agent
from policy_ppo import Policy
from value_function import Value_function
from utils import Mapminmax,Logger,Scaler
from flat_constraint import Flat_constraint
from glideslope_constraint import Glideslope_constraint
from attitude_constraint import Attitude_constraint
from thruster_model import Thruster_model
logger = Logger()
dynamics_model = Dynamics_model()
attitude_parameterization = attu.Quaternion_attitude()
lander_model = Lander_model(Thruster_model(), attitude_parameterization=attitude_parameterization,
apf_v0=70, apf_atarg=15., apf_tau2=100.)
lander_model.get_state_agent = lander_model.get_state_agent_tgo_alt
reward_object = Reward(tracking_bias=0.01,tracking_coeff=-0.01, fuel_coeff=-0.05, debug=False, landing_coeff=10.)
glideslope_constraint = Glideslope_constraint(gs_limit=-1.0)
shape_constraint = Flat_constraint()
attitude_constraint = Attitude_constraint(attitude_parameterization,
attitude_penalty=-100,attitude_coeff=-10,
attitude_limit=(10*np.pi, np.pi/2-np.pi/16, np.pi/2-np.pi/16))
env = Env(lander_model,dynamics_model,logger,
reward_object=reward_object,
glideslope_constraint=glideslope_constraint,
shape_constraint=shape_constraint,
attitude_constraint=attitude_constraint,
tf_limit=120.0,print_every=10)
obs_dim = 12
act_dim = 4
policy = Policy(obs_dim,act_dim,kl_targ=0.001,epochs=20, beta=0.1, shuffle=True, servo_kl=True)
import utils
fname = "optimize_4km"
input_normalizer = utils.load_run(policy,fname)
print(input_normalizer)
###Output
6dof dynamics model
Quaternion_attitude
Thruster Config Shape: (4, 6) 4
Inertia Tensor: [[2000. 0. 0.]
[ 0. 2000. 0.]
[ 0. 0. 3200.]]
Lander Model:
- apf_v0: 70
- apf_vf1: [ 0. 0. -2.]
- apf_vf2: [ 0. 0. -1.]
- apf_atarg: 15.0
- apf_tau1: 20
- apf_tau2: 100.0
Reward_terminal
queue fixed
Flat Constraint
Attitude Constraint
###Markdown
9 square km
###Code
policy.test_mode=True
env.lander.divert=(0,0,0)
env.lander.apf_pot=env.lander.apf_pot2
env.ic_gen = Landing_icgen(mass_uncertainty=0.03, g_uncertainty=(0.0,0.0),
adapt_apf_v0=True,
attitude_parameterization=attitude_parameterization,
downrange = (0,3000 , -70, -10),
crossrange = (-1500,1500 , -30,30),
altitude = (2300,2400,-90,-70),
yaw = (-np.pi/8, np.pi/8, 0.0, 0.0) ,
pitch = (np.pi/4-np.pi/8, np.pi/4+np.pi/16, -0.0, 0.0),
roll = (-np.pi/8, np.pi/8, -0.0, 0.0),
noise_u=0*np.ones(3), noise_sd=0*np.ones(3))
env.test_policy_batch(policy,input_normalizer,10000,print_every=100)
###Output
i : 100
Cumulative Stats (mean,std,max,argmax)
thrust |9766.26 |2701.78 |3200.00 |16000.00 | 94
glideslope | 3.028 | 8.272 | 0.515 |620.573 | 56
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.3 0.0 -0.0 | 0.5 0.5 0.0 | -0.7 -2.7 -0.0 | 1.8 1.7 -0.0
velocity | 0.044 -0.034 -0.949 | 0.019 0.065 0.040 | -0.027 -0.137 -1.028 | 0.098 0.121 -0.825
fuel |301.74 | 22.40 |265.67 |361.48
attitude_321 | -0.079 -0.025 -0.019 | 0.137 0.011 0.021 | -0.413 -0.058 -0.054 | 0.214 0.012 0.051
w | -0.037 -0.016 -0.000 | 0.059 0.027 0.000 | -0.175 -0.087 -0.000 | 0.078 0.052 0.000
glideslope | 22.057 | 7.661 | 10.838 | 43.266
i : 200
Cumulative Stats (mean,std,max,argmax)
thrust |9694.90 |2698.42 |3200.00 |16000.00 | 94
glideslope | 3.246 | 9.159 | 0.515 |620.573 | 56
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 0.0 -0.0 | 0.5 0.5 0.0 | -0.8 -2.7 -0.0 | 1.8 1.7 -0.0
velocity | 0.043 -0.033 -0.947 | 0.018 0.066 0.041 | -0.027 -0.148 -1.060 | 0.098 0.140 -0.825
fuel |304.49 | 23.21 |265.67 |368.10
attitude_321 | -0.069 -0.025 -0.019 | 0.149 0.010 0.023 | -0.415 -0.058 -0.078 | 0.418 0.012 0.051
w | -0.034 -0.013 0.000 | 0.056 0.029 0.000 | -0.175 -0.123 -0.000 | 0.078 0.052 0.000
glideslope | 21.318 | 7.982 | 10.349 | 58.363
i : 300
Cumulative Stats (mean,std,max,argmax)
thrust |9683.89 |2709.94 |3200.00 |16000.00 | 94
glideslope | 3.077 | 8.955 | 0.458 |691.980 | 280
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.4 0.0 | -0.9 -2.7 -0.0 | 1.8 1.7 -0.0
velocity | 0.043 -0.033 -0.946 | 0.017 0.065 0.039 | -0.027 -0.148 -1.060 | 0.098 0.140 -0.825
fuel |305.68 | 23.22 |265.67 |374.38
attitude_321 | -0.062 -0.025 -0.018 | 0.155 0.010 0.023 | -0.415 -0.067 -0.078 | 0.418 0.012 0.051
w | -0.033 -0.011 0.000 | 0.055 0.027 0.000 | -0.175 -0.123 -0.000 | 0.089 0.052 0.000
glideslope | 20.840 | 7.477 | 9.245 | 58.363
i : 400
Cumulative Stats (mean,std,max,argmax)
thrust |9659.65 |2684.46 |3200.00 |16000.00 | 94
glideslope | 3.093 | 8.569 | 0.458 |691.980 | 280
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -0.9 -2.7 -0.0 | 1.9 1.7 -0.0
velocity | 0.042 -0.033 -0.945 | 0.017 0.065 0.040 | -0.027 -0.148 -1.060 | 0.098 0.144 -0.809
fuel |305.06 | 22.63 |263.94 |374.38
attitude_321 | -0.057 -0.025 -0.018 | 0.156 0.010 0.022 | -0.464 -0.067 -0.078 | 0.418 0.012 0.051
w | -0.032 -0.010 0.000 | 0.054 0.026 0.000 | -0.175 -0.123 -0.000 | 0.089 0.068 0.000
glideslope | 20.611 | 7.271 | 9.245 | 58.363
i : 500
Cumulative Stats (mean,std,max,argmax)
thrust |9659.48 |2688.68 |3200.00 |16000.00 | 94
glideslope | 3.050 | 8.538 | 0.458 |691.980 | 280
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -2.7 -0.0 | 1.9 1.9 -0.0
velocity | 0.042 -0.032 -0.945 | 0.017 0.066 0.041 | -0.027 -0.148 -1.060 | 0.098 0.155 -0.809
fuel |305.34 | 22.89 |263.94 |387.27
attitude_321 | -0.058 -0.025 -0.018 | 0.158 0.010 0.022 | -0.464 -0.067 -0.078 | 0.418 0.014 0.051
w | -0.032 -0.010 0.000 | 0.055 0.027 0.000 | -0.175 -0.160 -0.000 | 0.089 0.068 0.000
glideslope | 20.689 | 7.494 | 9.245 | 66.473
i : 600
Cumulative Stats (mean,std,max,argmax)
thrust |9659.25 |2688.65 |3200.00 |16000.00 | 94
glideslope | 3.069 | 8.783 | 0.458 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -2.7 -0.0 | 2.0 2.2 -0.0
velocity | 0.042 -0.032 -0.944 | 0.017 0.065 0.042 | -0.027 -0.148 -1.060 | 0.098 0.155 -0.805
fuel |305.25 | 22.89 |263.94 |387.27
attitude_321 | -0.055 -0.025 -0.018 | 0.159 0.010 0.022 | -0.464 -0.067 -0.078 | 0.418 0.014 0.051
w | -0.032 -0.010 0.000 | 0.054 0.026 0.000 | -0.175 -0.160 -0.000 | 0.089 0.068 0.000
glideslope | 20.852 | 8.220 | 9.245 | 87.887
i : 700
Cumulative Stats (mean,std,max,argmax)
thrust |9639.72 |2692.51 |3200.00 |16000.00 | 94
glideslope | 3.033 | 8.569 | 0.432 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -3.4 -0.0 | 2.9 2.2 -0.0
velocity | 0.042 -0.032 -0.943 | 0.016 0.065 0.042 | -0.027 -0.148 -1.060 | 0.098 0.185 -0.805
fuel |306.11 | 23.52 |263.94 |392.67
attitude_321 | -0.057 -0.025 -0.018 | 0.156 0.009 0.022 | -0.464 -0.067 -0.078 | 0.495 0.014 0.051
w | -0.031 -0.009 0.000 | 0.053 0.026 0.000 | -0.175 -0.160 -0.000 | 0.108 0.073 0.000
glideslope | 20.703 | 7.899 | 9.245 | 87.887
i : 800
Cumulative Stats (mean,std,max,argmax)
thrust |9645.28 |2693.80 |3200.00 |16000.00 | 94
glideslope | 3.029 | 8.729 | 0.432 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -3.4 -0.0 | 2.9 2.2 -0.0
velocity | 0.042 -0.032 -0.943 | 0.017 0.065 0.043 | -0.027 -0.171 -1.060 | 0.100 0.185 -0.796
fuel |306.07 | 23.49 |263.12 |392.67
attitude_321 | -0.056 -0.025 -0.018 | 0.156 0.009 0.022 | -0.464 -0.067 -0.078 | 0.495 0.014 0.051
w | -0.031 -0.010 0.000 | 0.053 0.026 0.000 | -0.175 -0.160 -0.000 | 0.108 0.073 0.000
glideslope | 20.817 | 8.297 | 9.245 | 105.914
i : 900
Cumulative Stats (mean,std,max,argmax)
thrust |9640.82 |2690.91 |3200.00 |16000.00 | 94
glideslope | 3.026 | 8.622 | 0.265 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -3.4 -0.0 | 2.9 2.2 -0.0
velocity | 0.042 -0.032 -0.944 | 0.017 0.065 0.043 | -0.027 -0.171 -1.060 | 0.100 0.185 -0.791
fuel |305.94 | 23.61 |263.12 |392.67
attitude_321 | -0.057 -0.025 -0.018 | 0.156 0.009 0.022 | -0.464 -0.067 -0.078 | 0.495 0.014 0.051
w | -0.031 -0.009 0.000 | 0.053 0.026 0.000 | -0.175 -0.160 -0.000 | 0.108 0.073 0.000
glideslope | 20.817 | 8.574 | 9.245 | 105.914
i : 1000
Cumulative Stats (mean,std,max,argmax)
thrust |9635.52 |2685.83 |3200.00 |16000.00 | 94
glideslope | 3.031 | 8.593 | 0.265 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -3.4 -0.0 | 2.9 2.2 -0.0
velocity | 0.042 -0.031 -0.944 | 0.017 0.066 0.043 | -0.027 -0.171 -1.060 | 0.100 0.185 -0.791
fuel |305.93 | 23.48 |263.12 |392.67
attitude_321 | -0.060 -0.025 -0.018 | 0.155 0.009 0.022 | -0.464 -0.067 -0.078 | 0.495 0.014 0.051
w | -0.031 -0.009 0.000 | 0.054 0.027 0.000 | -0.175 -0.160 -0.000 | 0.108 0.073 0.000
glideslope | 21.179 | 14.682 | 9.245 | 396.036
i : 1100
Cumulative Stats (mean,std,max,argmax)
thrust |9627.48 |2684.23 |3200.00 |16000.00 | 94
glideslope | 2.999 | 8.406 | 0.265 |1085.152 | 587
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.4 -0.0 -0.0 | 0.5 0.5 0.0 | -1.6 -3.4 -0.0 | 2.9 2.2 -0.0
velocity | 0.042 -0.031 -0.944 | 0.017 0.065 0.043 | -0.027 -0.171 -1.060 | 0.100 0.185 -0.791
fuel |306.10 | 23.68 |263.12 |392.67
attitude_321 | -0.059 -0.025 -0.018 | 0.155 0.009 0.022 | -0.464 -0.067 -0.078 | 0.495 0.014 0.051
w | -0.030 -0.009 0.000 | 0.054 0.027 0.000 | -0.175 -0.160 -0.000 | 0.108 0.073 0.000
glideslope | 21.164 | 14.321 | 9.245 | 396.036
###Markdown
12 sq km 3000m
###Code
policy.test_mode=True
env.lander.divert=(0,0,0)
env.lander.apf_pot=env.lander.apf_pot2
env.ic_gen = Landing_icgen(mass_uncertainty=0.03, g_uncertainty=(0.0,0.0),
adapt_apf_v0=True,
attitude_parameterization=attitude_parameterization,
downrange = (0,4000 , -70, -10),
crossrange = (-1500,1500 , -30,30),
altitude = (2900,3100,-90,-70),
yaw = (-np.pi/8, np.pi/8, 0.0, 0.0) ,
pitch = (np.pi/4-np.pi/8, np.pi/4+np.pi/16, -0.0, 0.0),
roll = (-np.pi/8, np.pi/8, -0.0, 0.0),
noise_u=0*np.ones(3), noise_sd=0*np.ones(3))
env.test_policy_batch(policy,input_normalizer,10000,print_every=100)
###Output
i : 100
Cumulative Stats (mean,std,max,argmax)
thrust |9137.82 |2346.19 |3200.00 |16000.00 | 82
glideslope | 2.962 | 8.911 | 0.587 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 0.5 -0.2 -0.0 | 0.6 0.4 0.0 | -1.0 -1.3 -0.0 | 2.1 1.0 -0.0
velocity | 0.042 -0.038 -0.929 | 0.017 0.063 0.046 | -0.019 -0.142 -1.017 | 0.074 0.141 -0.777
fuel |334.84 | 26.90 |290.74 |405.12
attitude_321 | -0.033 -0.023 -0.015 | 0.172 0.008 0.020 | -0.487 -0.052 -0.046 | 0.343 0.001 0.039
w | -0.034 -0.008 0.000 | 0.048 0.023 0.000 | -0.165 -0.073 -0.000 | 0.058 0.059 0.000
glideslope | 20.491 | 7.236 | 10.895 | 52.488
i : 200
Cumulative Stats (mean,std,max,argmax)
thrust |9117.11 |2375.01 |3200.00 |16000.00 | 82
glideslope | 3.032 | 8.561 | 0.579 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 28.4 -3.5 43.4 | 287.1 67.2 352.8 | -1.5 -589.9 -0.0 | 3868.8 492.4 2961.8
velocity | -0.647 -0.177 -2.092 | 6.265 1.604 9.497 | -66.570 -17.943 -88.209 | 0.091 4.218 -0.612
fuel |332.65 | 49.44 | 3.82 |427.00
attitude_321 | -0.025 -0.019 -0.028 | 0.189 0.154 0.140 | -0.836 -1.385 -1.409 | 0.468 1.286 0.259
w | -0.035 -0.006 0.000 | 0.051 0.062 0.000 | -0.227 -0.437 -0.000 | 0.095 0.520 0.000
glideslope | 19.907 | 6.993 | 1.096 | 52.488
i : 300
Cumulative Stats (mean,std,max,argmax)
thrust |9115.20 |2376.81 |3200.00 |16000.00 | 82
glideslope | 2.970 | 8.069 | 0.579 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 55.0 -10.7 57.6 | 426.2 112.8 404.4 | -1.5 -1457.9 -0.0 | 3868.8 492.4 2965.7
velocity | -0.567 -0.087 -2.498 | 5.339 1.950 11.049 | -66.570 -17.943 -88.209 | 0.091 16.038 -0.612
fuel |331.41 | 54.08 | 3.82 |427.00
attitude_321 | -0.010 -0.031 -0.031 | 0.192 0.167 0.168 | -0.836 -1.385 -1.422 | 0.860 1.286 0.794
w | -0.037 -0.009 -0.000 | 0.049 0.062 0.000 | -0.227 -0.437 -0.000 | 0.095 0.520 0.000
glideslope | 19.834 | 7.181 | 1.096 | 52.488
i : 400
Cumulative Stats (mean,std,max,argmax)
thrust |9128.52 |2396.81 |3200.00 |16000.00 | 82
glideslope | 2.968 | 7.828 | 0.520 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 51.3 -9.0 50.5 | 418.2 99.3 379.3 | -1.5 -1457.9 -0.1 | 3950.0 492.4 2965.7
velocity | -0.447 -0.037 -2.302 | 4.674 1.877 10.341 | -66.570 -17.943 -88.209 | 0.091 16.301 -0.612
fuel |332.90 | 52.11 | 3.82 |427.00
attitude_321 | -0.011 -0.033 -0.026 | 0.187 0.160 0.148 | -0.836 -1.390 -1.422 | 0.860 1.286 0.794
w | -0.038 -0.010 0.000 | 0.049 0.061 0.000 | -0.227 -0.533 -0.000 | 0.095 0.520 0.000
glideslope | 19.793 | 7.199 | 1.096 | 65.029
i : 500
Cumulative Stats (mean,std,max,argmax)
thrust |9153.37 |2393.57 |3200.00 |16000.00 | 412
glideslope | 3.011 | 7.880 | 0.520 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 41.1 -7.2 40.4 | 374.7 88.9 339.9 | -1.5 -1457.9 -0.1 | 3950.0 492.4 2965.7
velocity | -0.349 -0.038 -2.026 | 4.187 1.679 9.268 | -66.570 -17.943 -88.209 | 0.091 16.301 -0.612
fuel |333.12 | 48.39 | 3.82 |427.00
attitude_321 | -0.013 -0.031 -0.024 | 0.184 0.143 0.133 | -0.836 -1.390 -1.422 | 0.860 1.286 0.794
w | -0.038 -0.010 -0.000 | 0.049 0.055 0.000 | -0.227 -0.533 -0.000 | 0.105 0.520 0.000
glideslope | 19.970 | 7.211 | 1.096 | 65.029
i : 600
Cumulative Stats (mean,std,max,argmax)
thrust |9153.89 |2395.08 |3200.00 |16000.00 | 412
glideslope | 3.013 | 7.978 | 0.436 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 41.4 -9.8 43.4 | 374.3 105.3 351.9 | -1.5 -1457.9 -0.1 | 3950.0 492.4 2965.7
velocity | -0.414 -0.122 -2.084 | 4.666 2.094 9.412 | -66.570 -26.319 -88.209 | 0.091 16.301 -0.612
fuel |332.80 | 49.61 | 3.82 |427.00
attitude_321 | -0.012 -0.029 -0.028 | 0.185 0.145 0.145 | -0.836 -1.390 -1.422 | 0.860 1.308 0.794
w | -0.038 -0.010 -0.000 | 0.049 0.056 0.000 | -0.227 -0.558 -0.000 | 0.105 0.520 0.000
glideslope | 19.835 | 6.993 | 1.083 | 65.029
i : 700
Cumulative Stats (mean,std,max,argmax)
thrust |9171.30 |2387.37 |3200.00 |16000.00 | 412
glideslope | 3.014 | 8.124 | 0.436 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 35.5 -8.8 41.4 | 346.9 98.0 344.5 | -57.9 -1457.9 -0.1 | 3950.0 492.4 2976.3
velocity | -0.418 -0.132 -2.023 | 4.698 2.020 9.127 | -66.570 -26.319 -88.209 | 0.114 16.301 -0.566
fuel |332.13 | 48.64 | 3.82 |427.00
attitude_321 | -0.016 -0.026 -0.028 | 0.191 0.144 0.144 | -0.923 -1.390 -1.433 | 0.860 1.320 0.794
w | -0.038 -0.010 0.000 | 0.051 0.055 0.000 | -0.227 -0.558 -0.000 | 0.181 0.520 0.000
glideslope | 20.238 | 8.103 | 1.083 | 97.429
*** ATT VIO TYPE CNT: [0. 1. 0.]
i : 800
Cumulative Stats (mean,std,max,argmax)
thrust |9165.31 |2383.91 |3200.00 |16000.00 | 412
glideslope | 2.984 | 7.934 | 0.436 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 31.5 -9.5 40.0 | 324.8 104.5 338.8 | -57.9 -1457.9 -0.1 | 3950.0 492.4 2976.3
velocity | -0.444 -0.083 -1.977 | 4.975 2.153 8.907 | -66.570 -26.319 -88.209 | 0.114 29.085 -0.566
fuel |332.25 | 48.15 | 3.34 |427.00
attitude_321 | -0.020 -0.025 -0.027 | 0.188 0.143 0.136 | -0.923 -1.390 -1.433 | 0.860 1.379 0.794
w | -0.037 -0.009 -0.000 | 0.052 0.059 0.000 | -0.227 -0.558 -0.000 | 0.181 0.764 0.000
glideslope | 20.242 | 8.195 | 1.004 | 97.429
i : 900
Cumulative Stats (mean,std,max,argmax)
thrust |9161.07 |2383.82 |3200.00 |16000.00 | 412
glideslope | 2.982 | 7.900 | 0.436 |819.085 | 15
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 28.1 -8.5 35.5 | 306.4 98.6 319.7 | -57.9 -1457.9 -0.1 | 3950.0 492.4 2976.3
velocity | -0.390 -0.080 -1.860 | 4.694 2.030 8.405 | -66.570 -26.319 -88.209 | 0.114 29.085 -0.566
fuel |332.63 | 46.46 | 3.34 |427.00
attitude_321 | -0.018 -0.024 -0.026 | 0.185 0.135 0.128 | -0.923 -1.390 -1.433 | 0.860 1.379 0.794
w | -0.038 -0.009 0.000 | 0.051 0.056 0.000 | -0.227 -0.558 -0.000 | 0.181 0.764 0.000
glideslope | 20.291 | 8.070 | 1.004 | 97.429
i : 1000
Cumulative Stats (mean,std,max,argmax)
thrust |9159.83 |2385.09 |3200.00 |16000.00 | 412
glideslope | 2.974 | 8.161 | 0.436 |1194.846 | 988
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 25.3 -7.6 32.0 | 290.8 93.6 303.5 | -57.9 -1457.9 -0.1 | 3950.0 492.4 2976.3
velocity | -0.346 -0.077 -1.767 | 4.455 1.926 7.979 | -66.570 -26.319 -88.209 | 0.114 29.085 -0.566
fuel |333.29 | 44.95 | 3.34 |427.00
attitude_321 | -0.018 -0.024 -0.025 | 0.183 0.128 0.122 | -0.923 -1.390 -1.433 | 0.860 1.379 0.794
w | -0.038 -0.009 0.000 | 0.052 0.054 0.000 | -0.227 -0.558 -0.000 | 0.181 0.764 0.000
glideslope | 20.383 | 8.289 | 1.004 | 97.429
i : 1100
Cumulative Stats (mean,std,max,argmax)
thrust |9159.60 |2385.47 |3200.00 |16000.00 | 412
glideslope | 2.971 | 8.432 | 0.436 |1532.022 | 1027
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
position | 27.0 -7.8 34.4 | 300.5 91.7 314.8 | -57.9 -1457.9 -0.1 | 3950.0 492.4 2976.3
velocity | -0.391 -0.074 -1.831 | 4.736 1.920 8.283 | -66.570 -26.319 -88.209 | 0.114 29.085 -0.566
fuel |333.14 | 45.87 | 3.34 |427.00
attitude_321 | -0.021 -0.024 -0.026 | 0.184 0.136 0.121 | -0.923 -1.390 -1.433 | 0.860 1.393 0.794
w | -0.038 -0.008 -0.000 | 0.052 0.060 0.000 | -0.236 -0.558 -0.000 | 0.181 1.015 0.000
glideslope | 20.248 | 8.161 | 1.004 | 97.429
|
Python Assignment 3.ipynb | ###Markdown
Assigment 3: Split the check-Part II loops and lists OBJECTIVEYou will practice how to read numerical and text data from the command line, do simple arithmetic computations, and to output results to the terminal, but this time using lists to store the data.THE PROBLEMYou have been in a fancy restaurant with four of your best friends. All the individual bills are brought to the table and it is time to add the tip and to compute everyone’s share. Since nobody ordered anything extravagantly expensive, you decide that everyone will pay an equal share of the bill. The number of friends variable `number_of_friends` is already provided for you as function parameter and initialized with a default value of 5. Inside your program, you have to declare 2 new variables: one for a list of `names` and for a list of `bills`. Assume the group has five or more people. Request names of the attendees and store them in the list `names` . Then assign bills for each person and store each of them in the corresponding list variable `bills`. Print out both of those variables. Next ask for input for the tip percentage and compute and print the total bill and the amount each person must pay.You are must use the number_of_friends variable in defining the number of iterations for your loop (either for or while loop) and the size or length of your lists must match the value of number_of_friends. All variables must be declared except (number_of_friends) and used. You will get a ZERO if you not comply with the instructions.Your program should start by a documentation string that includes your name, the program name, the Course number, the semester and a short description of the program, as in: """ Tanvir Rahman, Peoplesoft ID: 111111 Program 1: Sharing the bill This program computes individual shares of a restaurant bill. """ Note that the text above starts and ends with three double quotes.HINTS1. The easiest way to output numbers with two decimals is to use the f-strings to insert them into astring:print(f"The result is {result:.2f }") Output: The following example demonstrates the expected output from running your program.Note that these names and bills must be entered by the user from the keyboard and not hardcoded in your program.Enter name of friend: DavidEnter name of friend: JohnEnter name of friend: JaneEnter name of friend: SmithEnter name of friend: AliEnter bill for David: 25.2Enter bill for John: 22.99Enter bill for Jane: 23.5Enter bill for Smith: 24.0Enter bill for Ali: 24.8 Names of Friends: David, John, Jane, Smith, AliIndividual bills: 25.2, 22.99, 23.5, 24.0, 24.8Enter tip percentage: 20Total bill plus tip: $ 144.59Each of us must pay $ 28.92 What to submit: your source code properly documented in a .ipynb file named according to the instructions: FirstName_LastName_assignmentNumber.ipynb How to submit: Please submit homework in moodle Login into moodle with you UH credentials look up the assignment and submit your work. Recommendations: Your source code must run without syntax errors in order to receive a passing grade for this assignment. Make sure you add comments in your source code that describe what the program is supposed to do. Before submitting your code, make sure you test it with different input values to ensure it works correctly.
###Code
#Replace pass with body of statement to complete the homework
def split_the_check(number_of_friends=5):
names=[]
bills=[]
for name in range(number_of_friends):
print('Enter name of friend:')
nameofFrnd='name'+str(name)
nameofFrnd = input()
names.append(nameofFrnd)
for i in range(number_of_friends):
print(f'Enter bill for {names[i]} : ')
bill='bill'+str(name)
bill = float(input())
bills.append(bill)
print('Names of Friends: ', names)
print(f'Individual bills: {bills}')
totalbill=0
for i in range(0,len(bills)):
totalbill=totalbill+bills[i]
print('Enter tip percentage: ')
tipPercent = float(input())
tipAmount=(tipPercent*totalbill)/100
totalBill=totalbill+tipAmount
print(f'Total bill plus tip: $ {round(totalBill,2)}')
avgTotal=(totalBill)/number_of_friends
print(f'Each of us must pay $ {round(avgTotal,2)}')
return round(avgTotal,2)
split_the_check(5)
###Output
_____no_output_____ |
Federated_Learning.ipynb | ###Markdown
Section: Federated Learning Lesson: Introducing Federated LearningFederated Learning is a technique for training Deep Learning models on data to which you do not have access. Basically:Federated Learning: Instead of bringing all the data to one machine and training a model, we bring the model to the data, train it locally, and merely upload "model updates" to a central server.Use Cases: - app company (Texting prediction app) - predictive maintenance (automobiles / industrial engines) - wearable medical devices - ad blockers / autotomplete in browsers (Firefox/Brave) Challenge Description: data is distributed amongst sources but we cannot aggregated it because of: - privacy concerns: legal, user discomfort, competitive dynamics - engineering: the bandwidth/storage requirements of aggregating the larger dataset Lesson: Introducing / Installing PySyftIn order to perform Federated Learning, we need to be able to use Deep Learning techniques on remote machines. This will require a new set of tools. Specifically, we will use an extensin of PyTorch called PySyft. Install PySyftThe easiest way to install the required libraries is with [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/overview.html). Create a new environment, then install the dependencies in that environment. In your terminal:```bashconda create -n pysyft python=3conda activate pysyft some older version of conda require "source activate pysyft" instead.conda install jupyter notebookpip install syftpip install numpy```If you have any errors relating to zstd - run the following (if everything above installed fine then skip this step):```pip install --upgrade --force-reinstall zstd```and then retry installing syft (pip install syft).If you are using Windows, I suggest installing [Anaconda and using the Anaconda Prompt](https://docs.anaconda.com/anaconda/user-guide/getting-started/) to work from the command line. With this environment activated and in the repo directory, launch Jupyter Notebook:```bashjupyter notebook```and re-open this notebook on the new Jupyter server.If any part of this doesn't work for you (or any of the tests fail) - first check the [README](https://github.com/OpenMined/PySyft.git) for installation help and then open a Github Issue or ping the beginner channel in our slack! [slack.openmined.org](http://slack.openmined.org/)
###Code
import torch as th
x = th.tensor([1,2,3,4,5])
x
y = x + x
print(y)
!pip install syft
import syft as sy
hook = sy.TorchHook(th)
th.tensor([1,2,3,4,5])
###Output
_____no_output_____
###Markdown
Lesson: Basic Remote Execution in PySyft PySyft => Remote PyTorchThe essence of Federated Learning is the ability to train models in parallel on a wide number of machines. Thus, we need the ability to tell remote machines to execute the operations required for Deep Learning.Thus, instead of using Torch tensors - we're now going to work with **pointers** to tensors. Let me show you what I mean. First, let's create a "pretend" machine owned by a "pretend" person - we'll call him Bob.
###Code
bob = sy.VirtualWorker(hook, id="bob")
bob._objects
x = th.tensor([1,2,3,4,5])
x = x.send(bob)
bob._objects
x.location
x.id_at_location
x.id
x.owner
hook.local_worker
x
x = x.get()
x
bob._objects
###Output
_____no_output_____
###Markdown
Project: Playing with Remote TensorsIn this project, I want you to .send() and .get() a tensor to TWO workers by calling .send(bob,alice). This will first require the creation of another VirtualWorker called alice.
###Code
# try this project here!
alice = sy.VirtualWorker(hook, id="alice")
x = th.tensor([1,2,3,4,5])
x_ptr = x.send(bob, alice)
x_ptr.get()
x = th.tensor([1,2,3,4,5]).send(bob,alice)
x.get(sum_results=True)
###Output
_____no_output_____
###Markdown
Lesson: Introducing Remote Arithmetic
###Code
x = th.tensor([1,2,3,4,5]).send(bob)
y = th.tensor([1,1,1,1,1]).send(bob)
x
y
z = x + y
z
z = z.get()
z
z = th.add(x,y)
z
z = z.get()
z
x = th.tensor([1.,2,3,4,5], requires_grad=True).send(bob)
y = th.tensor([1.,1,1,1,1], requires_grad=True).send(bob)
z = (x + y).sum()
z.backward()
x = x.get()
x
x.grad
###Output
_____no_output_____
###Markdown
Project: Learn a Simple Linear ModelIn this project, I'd like for you to create a simple linear model which will solve for the following dataset below. You should use only Variables and .backward() to do so (no optimizers or nn.Modules). Furthermore, you must do so with both the data and the model being located on Bob's machine.
###Code
# try this project here!
input = th.tensor([[1.,1],[0,1,],[1,0],[0,0]], requires_grad=True).send(bob)
target = th.tensor([[1.],[1],[0],[0]], requires_grad=True).send(bob)
weights = th.tensor([[0.],[0.]], requires_grad=True).send(bob)
pred,target
for i in range(10):
pred = input.mm(weights)
loss = ((pred - target)**2).sum()
loss.backward()
weights.data.sub_(weights.grad * 0.1)
weights.grad *= 0
print(loss.get().data)
###Output
tensor(0.0058)
tensor(0.0037)
tensor(0.0024)
tensor(0.0015)
tensor(0.0010)
tensor(0.0006)
tensor(0.0004)
tensor(0.0003)
tensor(0.0002)
tensor(0.0001)
###Markdown
Lesson: Garbage Collection and Common Errors
###Code
bob = bob.clear_objects()
bob._objects
x = th.tensor([1,2,3,4,5]).send(bob)
bob._objects
del x
bob._objects
x = th.tensor([1,2,3,4,5]).send(bob)
bob._objects
x = "asdf"
bob._objects
x = th.tensor([1,2,3,4,5]).send(bob)
x
bob._objects
x = "asdf"
bob._objects
del x
bob._objects
bob = bob.clear_objects()
bob._objects
for i in range(1000):
x = th.tensor([1,2,3,4,5]).send(bob)
bob._objects
#x = th.tensor([1,2,3,4,5]).send(bob)
#y = th.tensor([1,1,1,1,1])
#z = x + y
#x = th.tensor([1,2,3,4,5]).send(bob)
#y = th.tensor([1,1,1,1,1]).send(alice)
#z = x + y
###Output
_____no_output_____
###Markdown
Lesson: Toy Federated LearningLet's start by training a toy model the centralized way. This is about a simple as models get. We first need:- a toy dataset- a model- some basic training logic for training a model to fit the data.
###Code
from torch import nn, optim
# A Toy Dataset
data = th.tensor([[1.,1],[0,1],[1,0],[0,0]], requires_grad=True)
target = th.tensor([[1.],[1], [0], [0]], requires_grad=True)
# A Toy Model
model = nn.Linear(2,1)
opt = optim.SGD(params=model.parameters(), lr=0.1)
def train(iterations=20):
for iter in range(iterations):
opt.zero_grad()
pred = model(data)
loss = ((pred - target)**2).sum()
loss.backward()
opt.step()
print(loss.data)
train()
data_bob = data[0:2].send(bob)
target_bob = target[0:2].send(bob)
data_alice = data[2:4].send(alice)
target_alice = target[2:4].send(alice)
datasets = [(data_bob, target_bob), (data_alice, target_alice)]
_data, _target=datasets[0]
_data.location
model = model.send(_data.location)
list(model.parameters(model))
def train(iterations=20):
model = nn.Linear(2,1)
opt = optim.SGD(params=model.parameters(), lr=0.1)
for iter in range(iterations):
for _data, _target in datasets:
# send model to the data
model = model.send(_data.location)
# do normal training
opt.zero_grad()
pred = model(_data)
loss = ((pred - _target)**2).sum()
loss.backward()
opt.step()
# get smarter model back
model = model.get()
print(loss.get())
train()
###Output
_____no_output_____
###Markdown
Lesson: Advanced Remote Execution ToolsIn the last section we trained a toy model using Federated Learning. We did this by calling .send() and .get() on our model, sending it to the location of training data, updating it, and then bringing it back. However, at the end of the example we realized that we needed to go a bit further to protect people privacy. Namely, we want to average the gradients BEFORE calling .get(). That way, we won't ever see anyone's exact gradient (thus better protecting their privacy!!!)But, in order to do this, we need a few more pieces:- use a pointer to send a Tensor directly to another workerAnd in addition, while we're here, we're going to learn about a few more advanced tensor operations as well which will help us both with this example and a few in the future!
###Code
bob.clear_objects()
alice.clear_objects()
x = th.tensor([1,2,3,4,5]).send(bob)
x = x.send(alice)
bob._objects
alice._objects
y = x + x
y
bob._objects
alice._objects
jon = sy.VirtualWorker(hook, id="jon")
bob.clear_objects()
alice.clear_objects()
x = th.tensor([1,2,3,4,5]).send(bob).send(alice)
bob._objects
alice._objects
x = x.get()
x
bob._objects
alice._objects
x = x.get()
x
bob._objects
bob.clear_objects()
alice.clear_objects()
x = th.tensor([1,2,3,4,5]).send(bob).send(alice)
bob._objects
alice._objects
del x
bob._objects
alice._objects
###Output
_____no_output_____
###Markdown
Lesson: Pointer Chain Operations
###Code
bob.clear_objects()
alice.clear_objects()
x = th.tensor([1,2,3,4,5]).send(bob)
bob._objects
alice._objects
x.move(alice)
bob._objects
alice._objects
x = th.tensor([1,2,3,4,5]).send(bob).send(alice)
bob._objects
alice._objects
x.remote_get()
bob._objects
alice._objects
x.move(bob)
x
bob._objects
alice._objects
###Output
_____no_output_____
###Markdown
###Code
import sys
import pandas as pd
import numpy as np
import sklearn
import keras
print('Python: {}'.format(sys.version))
print('Pandas: {}'.format(pd.__version__))
print('Numpy: {}'.format(np.__version__))
print('Sklearn: {}'.format(sklearn.__version__))
print('Keras: {}'.format(keras.__version__))
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
%matplotlib inline
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
py.init_notebook_mode(connected=True)
import warnings
warnings.filterwarnings('ignore')
# Data processing, metrics and modeling
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import precision_score, recall_score, confusion_matrix, roc_curve, precision_recall_curve, accuracy_score, roc_auc_score
import lightgbm as lgbm
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve,auc
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
from yellowbrick.classifier import DiscriminationThreshold
# Stats
import scipy.stats as ss
from scipy import interp
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
# Time
from contextlib import contextmanager
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
#ignore warning messages
import warnings
warnings.filterwarnings('ignore')
# Always good to set a seed for reproducibility
SEED = 7
np.random.seed(SEED)
from google.colab import drive
drive.mount('/content/drive')
names = ['n_pregnant', 'glucose_concentration', 'blood_pressuer (mm Hg)', 'skin_thickness (mm)', 'serum_insulin (mu U/ml)',
'BMI', 'pedigree_function', 'age', 'class']
#df = pd.read_csv('../input/diabetes.csv', names = names)
df = pd.read_csv('/content/drive/MyDrive/diabetes.csv')
df_name=df.columns
df.head()
df.info()
df.describe()
data=df
# 2 datasets
D = data[(data['Outcome'] != 0)]
H = data[(data['Outcome'] == 0)]
#------------COUNT-----------------------
def target_count():
trace = go.Bar( x = data['Outcome'].value_counts().values.tolist(),
y = ['healthy','diabetic' ],
orientation = 'h',
text=data['Outcome'].value_counts().values.tolist(),
textfont=dict(size=15),
textposition = 'auto',
opacity = 0.8,marker=dict(
color=['lightskyblue', 'gold'],
line=dict(color='#000000',width=1.5)))
layout = dict(title = 'Count of Outcome variable')
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
#------------PERCENTAGE-------------------
def target_percent():
trace = go.Pie(labels = ['healthy','diabetic'], values = data['Outcome'].value_counts(),
textfont=dict(size=15), opacity = 0.8,
marker=dict(colors=['lightskyblue', 'gold'],
line=dict(color='#000000', width=1.5)))
layout = dict(title = 'Distribution of Outcome variable')
fig = dict(data = [trace], layout=layout)
py.iplot(fig)
target_col = ["Outcome"]
cat_cols = data.nunique()[data.nunique() < 12].keys().tolist()
cat_cols = [x for x in cat_cols ]
#numerical columns
num_cols = [x for x in data.columns if x not in cat_cols + target_col]
#Binary columns with 2 values
bin_cols = data.nunique()[data.nunique() == 2].keys().tolist()
#Columns more than 2 values
multi_cols = [i for i in cat_cols if i not in bin_cols]
#Label encoding Binary columns
le = LabelEncoder()
for i in bin_cols :
data[i] = le.fit_transform(data[i])
#Duplicating columns for multi value columns
data = pd.get_dummies(data = data,columns = multi_cols )
#Scaling Numerical columns
std = StandardScaler()
scaled = std.fit_transform(data[num_cols])
scaled = pd.DataFrame(scaled,columns=num_cols)
#dropping original values merging scaled values for numerical columns
df_data_og = data.copy()
data = data.drop(columns = num_cols,axis = 1)
data = data.merge(scaled,left_index=True,right_index=True,how = "left")
def correlation_plot():
#correlation
correlation = data.corr()
#tick labels
matrix_cols = correlation.columns.tolist()
#convert to array
corr_array = np.array(correlation)
trace = go.Heatmap(z = corr_array,
x = matrix_cols,
y = matrix_cols,
colorscale='Viridis',
colorbar = dict() ,
)
layout = go.Layout(dict(title = 'Correlation Matrix for variables',
#autosize = False,
#height = 1400,
#width = 1600,
margin = dict(r = 0 ,l = 100,
t = 0,b = 100,
),
yaxis = dict(tickfont = dict(size = 9)),
xaxis = dict(tickfont = dict(size = 9)),
)
)
fig = go.Figure(data = [trace],layout = layout)
py.iplot(fig)
correlation_plot()
# Def X and Y
X = data.drop('Outcome', 1)
y = data['Outcome']
def model_performance(model, subtitle) :
#Kfold
cv = KFold(n_splits=5,shuffle=False, random_state = 42)
y_real = []
y_proba = []
tprs = []
aucs = []
mean_fpr = np.linspace(0,1,100)
i = 1
for train,test in cv.split(X,y):
model.fit(X.iloc[train], y.iloc[train])
pred_proba = model.predict_proba(X.iloc[test])
precision, recall, _ = precision_recall_curve(y.iloc[test], pred_proba[:,1])
y_real.append(y.iloc[test])
y_proba.append(pred_proba[:,1])
fpr, tpr, t = roc_curve(y[test], pred_proba[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
# Confusion matrix
y_pred = cross_val_predict(model, X, y, cv=5)
conf_matrix = confusion_matrix(y, y_pred)
trace1 = go.Heatmap(z = conf_matrix ,x = ["0 (pred)","1 (pred)"],
y = ["0 (true)","1 (true)"],xgap = 2, ygap = 2,
colorscale = 'Viridis', showscale = False)
#Show metrics
tp = conf_matrix[1,1]
fn = conf_matrix[1,0]
fp = conf_matrix[0,1]
tn = conf_matrix[0,0]
Accuracy = ((tp+tn)/(tp+tn+fp+fn))
Precision = (tp/(tp+fp))
Recall = (tp/(tp+fn))
F1_score = (2*(((tp/(tp+fp))*(tp/(tp+fn)))/((tp/(tp+fp))+(tp/(tp+fn)))))
show_metrics = pd.DataFrame(data=[[Accuracy , Precision, Recall, F1_score]])
show_metrics = show_metrics.T
colors = ['gold', 'lightgreen', 'lightcoral', 'lightskyblue']
trace2 = go.Bar(x = (show_metrics[0].values),
y = ['Accuracy', 'Precision', 'Recall', 'F1_score'], text = np.round_(show_metrics[0].values,4),
textposition = 'auto', textfont=dict(color='black'),
orientation = 'h', opacity = 1, marker=dict(
color=colors,
line=dict(color='#000000',width=1.5)))
#Roc curve
mean_tpr = np.mean(tprs, axis=0)
mean_auc = auc(mean_fpr, mean_tpr)
trace3 = go.Scatter(x=mean_fpr, y=mean_tpr,
name = "Roc : " ,
line = dict(color = ('rgb(22, 96, 167)'),width = 2), fill='tozeroy')
trace4 = go.Scatter(x = [0,1],y = [0,1],
line = dict(color = ('black'),width = 1.5,
dash = 'dot'))
#Precision - recall curve
y_real = y
y_proba = np.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
trace5 = go.Scatter(x = recall, y = precision,
name = "Precision" + str(precision),
line = dict(color = ('lightcoral'),width = 2), fill='tozeroy')
mean_auc=round(mean_auc,3)
#Subplots
fig = tls.make_subplots(rows=2, cols=2, print_grid=False,
specs=[[{}, {}],
[{}, {}]],
subplot_titles=('Confusion Matrix',
'Metrics',
'ROC curve'+" "+ '('+ str(mean_auc)+')',
'Precision - Recall curve',
))
#Trace and layout
fig.append_trace(trace1,1,1)
fig.append_trace(trace2,1,2)
fig.append_trace(trace3,2,1)
fig.append_trace(trace4,2,1)
fig.append_trace(trace5,2,2)
fig['layout'].update(showlegend = False, title = '<b>Model performance report (5 folds)</b><br>'+subtitle,
autosize = False, height = 830, width = 830,
plot_bgcolor = 'black',
paper_bgcolor = 'black',
margin = dict(b = 195), font=dict(color='white'))
fig["layout"]["xaxis1"].update(color = 'white')
fig["layout"]["yaxis1"].update(color = 'white')
fig["layout"]["xaxis2"].update((dict(range=[0, 1], color = 'white')))
fig["layout"]["yaxis2"].update(color = 'white')
fig["layout"]["xaxis3"].update(dict(title = "false positive rate"), color = 'white')
fig["layout"]["yaxis3"].update(dict(title = "true positive rate"),color = 'white')
fig["layout"]["xaxis4"].update(dict(title = "recall"), range = [0,1.05],color = 'white')
fig["layout"]["yaxis4"].update(dict(title = "precision"), range = [0,1.05],color = 'white')
for i in fig['layout']['annotations']:
i['font'] = titlefont=dict(color='white', size = 14)
py.iplot(fig)
def scores_table(model, subtitle):
scores = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']
res = []
for sc in scores:
scores = cross_val_score(model, X, y, cv = 5, scoring = sc)
res.append(scores)
df = pd.DataFrame(res).T
df.loc['mean'] = df.mean()
df.loc['std'] = df.std()
df= df.rename(columns={0: 'accuracy', 1:'precision', 2:'recall',3:'f1',4:'roc_auc'})
trace = go.Table(
header=dict(values=['<b>Fold', '<b>Accuracy', '<b>Precision', '<b>Recall', '<b>F1 score', '<b>Roc auc'],
line = dict(color='#7D7F80'),
fill = dict(color='#a1c3d1'),
align = ['center'],
font = dict(size = 15)),
cells=dict(values=[('1','2','3','4','5','mean', 'std'),
np.round(df['accuracy'],3),
np.round(df['precision'],3),
np.round(df['recall'],3),
np.round(df['f1'],3),
np.round(df['roc_auc'],3)],
line = dict(color='#7D7F80'),
fill = dict(color='#EDFAFF'),
align = ['center'], font = dict(size = 15)))
layout = dict(width=800, height=400, title = '<b>Cross Validation - 5 folds</b><br>'+subtitle, font = dict(size = 15))
fig = dict(data=[trace], layout=layout)
py.iplot(fig, filename = 'styled_table')
random_state=42
fit_params = {"early_stopping_rounds" : 100,
"eval_metric" : 'auc',
"eval_set" : [(X,y)],
'eval_names': ['valid'],
'verbose': 0,
'categorical_feature': 'auto'}
param_test = {'learning_rate' : [0.01, 0.02, 0.03, 0.04, 0.05, 0.08, 0.1, 0.2, 0.3, 0.4],
'n_estimators' : [100, 200, 300, 400, 500, 600, 800, 1000, 1500, 2000],
'num_leaves': sp_randint(6, 50),
'min_child_samples': sp_randint(100, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
'max_depth': [-1, 1, 2, 3, 4, 5, 6, 7],
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}
#number of combinations
n_iter = 300
#intialize lgbm and lunch the search
lgbm_clf = lgbm.LGBMClassifier(random_state=random_state, silent=True, metric='None', n_jobs=4)
grid_search = RandomizedSearchCV(
estimator=lgbm_clf, param_distributions=param_test,
n_iter=n_iter,
scoring='accuracy',
cv=5,
refit=True,
random_state=random_state,
verbose=True)
grid_search.fit(X, y, **fit_params)
opt_parameters = grid_search.best_params_
lgbm_clf = lgbm.LGBMClassifier(**opt_parameters)
model_performance(lgbm_clf, 'LightGBM')
scores_table(lgbm_clf, 'LightGBM')
# Drop rows with missing values
df.dropna(inplace=True)
# summarize the number of rows and columns in df
df.describe()
# Convert dataframe to numpy array
dataset = df.values
print(dataset.shape)
# split into input (X) and an output (Y)
X = dataset[:,0:8]
Y = dataset[:, 8].astype(int)
print(X.shape)
print(Y.shape)
print(Y[:5])
# Normalize the data using sklearn StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
# Transform and display the training data
X_standardized = scaler.transform(X)
data = pd.DataFrame(X_standardized)
data.describe()
# import necessary sklearn and keras packages
from sklearn.model_selection import GridSearchCV, KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.optimizers import Adam
# Do a grid search for the optimal batch size and number of epochs
# import necessary packages
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
# Define a random seed
seed = 6
np.random.seed(seed)
# Start defining the model
def create_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim = 8, kernel_initializer='normal', activation='relu'))
model.add(Dense(16, input_dim = 8, kernel_initializer='normal', activation='relu'))
model.add(Dense(8, input_dim = 16, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile the model
adam = Adam(lr = 0.01)
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy'])
return model
# create the model
model = KerasClassifier(build_fn = create_model, verbose = 0)
# define the grid search parameters
batch_size = [16, 32, 64,128]
epochs = [2, 5, 10]
# make a dictionary of the grid search parameters
param_grid = dict(batch_size=batch_size, epochs=epochs)
# build and fit the GridSearchCV
grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(random_state=seed), verbose = 10)
grid_results = grid.fit(X_standardized, Y)
# summarize the results
print("Best: {0}, using {1}".format(grid_results.best_score_, grid_results.best_params_))
means = grid_results.cv_results_['mean_test_score']
stds = grid_results.cv_results_['std_test_score']
params = grid_results.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print('{0} ({1}) with: {2}'.format(mean, stdev, param))
best_batch_size = 64
best_epochs = 10 # 100
best_dropout_rate = 0.0
best_learn_rate = 0.01
best_activation = 'relu'
best_init = 'normal'
best_neuron1 = 16
best_neuron2 = 16
best_neuron3 = 8
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
#best model
model = Sequential()
model.add(Dense(best_neuron1, input_dim = 8, kernel_initializer= best_init, activation= best_activation))
model.add(Dense(best_neuron2, input_dim = best_neuron1, kernel_initializer= best_init, activation= best_activation))
model.add(Dense(best_neuron3, input_dim = best_neuron2, kernel_initializer= best_init, activation= best_activation))
model.add(Dense(1, activation='sigmoid'))
# compile the model
adam = Adam(lr = best_learn_rate)
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy'])
ckpt_model = 'pima-weights_best_t.hdf5'
checkpoint = ModelCheckpoint(ckpt_model,
monitor='val_accuracy',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=0)
print(X_train.shape)
print(X_test.shape)
history = model.fit(X_train,
y_train,
validation_data=(X_test, y_test),
epochs=best_epochs,
batch_size=best_batch_size,
callbacks=callbacks_list,
verbose=1)
!pip install pyyaml h5py # Required to save models in HDF5 format
# Display the model's architecture
model.summary()
# Save the entire model as a SavedModel.
!mkdir -p saved_model
model.save('/content/drive/MyDrive/saved_model/my_model1')
model.save('/content/drive/MyDrive/saved_model/my_model1.hdf5')
model.save_weights("/content/drive/MyDrive/weights1.h5")
model.save("/content/drive/MyDrive/")
model.load_weights("pima-weights_best_t.hdf5")
###Output
_____no_output_____
###Markdown
MOUNTING DRIVE
###Code
from google.colab import drive
drive.mount('/content/gdrive')
###Output
Mounted at /content/gdrive
###Markdown
IMPORTING LIBRARIES
###Code
!pip install medpy --upgrade --q
!pip install nibabel --upgrade --q
!pip install nilearn --upgrade --q
!pip install torchio --upgrade --q
import os
import torch
from glob import glob
import nibabel as nib
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from torch.autograd import Variable
from collections import OrderedDict
from torch.utils.data import Dataset, DataLoader
import nibabel as nib
from tqdm import tqdm
import enum
from skimage.transform import resize
import time
from scipy import stats
import random
from IPython import display
import torch.nn.functional as F
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.utils import to_categorical
import torchvision
import torchio as tio
###Output
_____no_output_____
###Markdown
GAN
###Code
import numpy as np
import torch
import os
from torch import nn
from torch import optim
from torch.nn import functional as F
from skimage.transform import resize
class Generator(nn.Module):
def __init__(self, noise:int=1000, channel:int=64):
super(Generator, self).__init__()
_c = channel
self.noise = noise
self.fc = nn.Linear(1000,512*4*4*4)
self.bn1 = nn.BatchNorm3d(_c*8)
self.tp_conv2 = nn.Conv3d(_c*8, _c*4, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(_c*4)
self.tp_conv3 = nn.Conv3d(_c*4, _c*2, kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm3d(_c*2)
self.tp_conv4 = nn.Conv3d(_c*2, _c, kernel_size=3, stride=1, padding=1, bias=False)
self.bn4 = nn.BatchNorm3d(_c)
self.tp_conv5 = nn.Conv3d(_c, 1, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, noise):
noise = noise.view(-1, 1000)
h = self.fc(noise)
h = h.view(-1,512,4,4,4)
h = F.relu(self.bn1(h))
h = F.interpolate(h,scale_factor = 2)
h = self.tp_conv2(h)
h = F.relu(self.bn2(h))
h = F.interpolate(h,scale_factor = 2)
h = self.tp_conv3(h)
h = F.relu(self.bn3(h))
h = F.interpolate(h,scale_factor = 2)
h = self.tp_conv4(h)
h = F.relu(self.bn4(h))
h = F.interpolate(h,scale_factor = 2)
h = self.tp_conv5(h)
h = torch.tanh(h)
# Extra layers
h = F.interpolate(h,scale_factor = 2)
return h
# Loading Generator
G = Generator().cuda()
G.load_state_dict(torch.load('/content/gdrive/MyDrive/WGAN_G.pth',map_location='cuda:0'))
import nibabel as nib
from nilearn import plotting
Show_color = False
noise = Variable(torch.randn((1, 1000)).cuda())
fake_image = G(noise)
featmask = np.squeeze(fake_image[0].data.cpu().numpy())
featmask = nib.Nifti1Image(featmask,affine = np.eye(4))
arr1 = [4,6,8,10,12,14,16,18,20,22,24,26,28,30,32]
arr2 = [34,36,38,40,42,44,46,48,50,52,54,56,58,60]
if Show_color:
disp = plotting.plot_img(featmask,cut_coords=arr1,draw_cross=False,annotate=False,black_bg=True,display_mode='x')
# disp.annotate(size=25,left_right=False,positions=True)
plotting.show()
disp=plotting.plot_img(featmask,cut_coords=arr2,draw_cross=False,annotate=False,black_bg=True,display_mode='x')
# disp.annotate(size=25,left_right=False)
plotting.show()
else:
disp = plotting.plot_anat(featmask,cut_coords=arr1,draw_cross=False,annotate=False,black_bg=True,display_mode='x')
plotting.show()
# disp.annotate(size=25,left_right=False)
disp=plotting.plot_anat(featmask,cut_coords=arr2,draw_cross=False,annotate=False,black_bg=True,display_mode='x')
# disp.annotate(size=25,left_right=False)
plotting.show()
# visualization
import matplotlib.pyplot as plt
def show_image(test_image):
count = 1
# test_image = test_image.view(64*2,64*2,64*2)
test_image = test_image.detach().to('cpu')
plt.figure(figsize=(20,12))
for i in range(48):
# if i%2==0:
plt.subplot(8,6,count)
count+=1
# plt.imshow(test_image[:,:,i])
plt.imshow(test_image[:,:,i], cmap='bone')
plt.show()
# plt.savefig('brain_48.png')
# noise = Variable(torch.randn((1, 1000, 1, 1 ,1)).cuda())
# fake_image = G(noise)
# print(fake_image.shape)
# show_image(fake_image[0])
###Output
_____no_output_____
###Markdown
UNET
###Code
class Attention(nn.Module): #it gives channel attention
def __init__(self, in_channels, reduced_dim): #input_shape ---> output_shape
super(Attention, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool3d(1), # C x H x W -> C x 1 x 1
nn.Conv3d(in_channels, reduced_dim, 1),
nn.SiLU(),
nn.Conv3d(reduced_dim, in_channels, 1),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.se(x)
@torch.jit.script
def autocrop(encoder_layer: torch.Tensor, decoder_layer: torch.Tensor):
"""
Center-crops the encoder_layer to the size of the decoder_layer,
so that merging (concatenation) between levels/blocks is possible.
This is only necessary for input sizes != 2**n for 'same' padding and always required for 'valid' padding.
"""
if encoder_layer.shape[2:] != decoder_layer.shape[2:]:
ds = encoder_layer.shape[2:]
es = decoder_layer.shape[2:]
assert ds[0] >= es[0]
assert ds[1] >= es[1]
if encoder_layer.dim() == 4: # 2D
encoder_layer = encoder_layer[
:,
:,
((ds[0] - es[0]) // 2):((ds[0] + es[0]) // 2),
((ds[1] - es[1]) // 2):((ds[1] + es[1]) // 2)
]
elif encoder_layer.dim() == 5: # 3D
assert ds[2] >= es[2]
encoder_layer = encoder_layer[
:,
:,
((ds[0] - es[0]) // 2):((ds[0] + es[0]) // 2),
((ds[1] - es[1]) // 2):((ds[1] + es[1]) // 2),
((ds[2] - es[2]) // 2):((ds[2] + es[2]) // 2),
]
return encoder_layer, decoder_layer
def conv_layer(dim: int):
if dim == 3:
return nn.Conv3d
elif dim == 2:
return nn.Conv2d
def get_conv_layer(in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
padding: int = 1,
bias: bool = True,
dim: int = 2):
return conv_layer(dim)(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias)
def conv_transpose_layer(dim: int):
if dim == 3:
return nn.ConvTranspose3d
elif dim == 2:
return nn.ConvTranspose2d
def get_up_layer(in_channels: int,
out_channels: int,
kernel_size: int = 2,
stride: int = 2,
dim: int = 3,
up_mode: str = 'transposed',
):
if up_mode == 'transposed':
return conv_transpose_layer(dim)(in_channels, out_channels, kernel_size=kernel_size, stride=stride)
else:
return nn.Upsample(scale_factor=2.0, mode=up_mode)
def maxpool_layer(dim: int):
if dim == 3:
return nn.MaxPool3d
elif dim == 2:
return nn.MaxPool2d
def get_maxpool_layer(kernel_size: int = 2,
stride: int = 2,
padding: int = 0,
dim: int = 2):
return maxpool_layer(dim=dim)(kernel_size=kernel_size, stride=stride, padding=padding)
def get_activation(activation: str):
if activation == 'ReLU':
return nn.ReLU()
elif activation == 'leaky':
return nn.LeakyReLU(negative_slope=0.1)
elif activation == 'elu':
return nn.ELU()
elif activation == 'PReLU':
return nn.PReLU()
def get_normalization(normalization: str,
num_channels: int,
dim: int):
if normalization == 'batch':
if dim == 3:
return nn.BatchNorm3d(num_channels)
elif dim == 2:
return nn.BatchNorm2d(num_channels)
elif normalization == 'instance':
if dim == 3:
return nn.InstanceNorm3d(num_channels)
elif dim == 2:
return nn.InstanceNorm2d(num_channels)
elif 'group' in normalization:
num_groups = int(normalization.partition('group')[-1]) # get the group size from string
return nn.GroupNorm(num_groups=num_groups, num_channels=num_channels)
class Concatenate(nn.Module):
def __init__(self):
super(Concatenate, self).__init__()
def forward(self, layer_1, layer_2):
x = torch.cat((layer_1, layer_2), 1)
return x
class DownBlock(nn.Module):
"""
A helper Module that performs 2 Convolutions and 1 MaxPool.
An activation follows each convolution.
A normalization layer follows each convolution.
"""
def __init__(self,
in_channels: int,
out_channels: int,
pooling: bool = True,
activation: str = 'relu',
normalization: str = None,
dim: str = 2,
conv_mode: str = 'same'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.normalization = normalization
if conv_mode == 'same':
self.padding = 1
elif conv_mode == 'valid':
self.padding = 0
self.dim = dim
self.activation = activation
# conv layers
self.conv1 = get_conv_layer(self.in_channels, self.out_channels, kernel_size=3, stride=1, padding=self.padding,
bias=True, dim=self.dim)
self.conv2 = get_conv_layer(self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=self.padding,
bias=True, dim=self.dim)
# pooling layer
if self.pooling:
self.pool = get_maxpool_layer(kernel_size=2, stride=2, padding=0, dim=self.dim)
# activation layers
self.act1 = get_activation(self.activation)
self.act2 = get_activation(self.activation)
# normalization layers
if self.normalization:
self.norm1 = get_normalization(normalization=self.normalization, num_channels=self.out_channels,
dim=self.dim)
self.norm2 = get_normalization(normalization=self.normalization, num_channels=self.out_channels,
dim=self.dim)
self.Attention = Attention(self.out_channels,16)
# self.ProjectExciteLayer = ProjectExciteLayer(self.out_channels)
def forward(self, x):
y = self.conv1(x) # convolution 1
y = self.act1(y) # activation 1
if self.normalization:
y = self.norm1(y) # normalization 1
y = self.conv2(y) # convolution 2
y = self.act2(y) # activation 2
if self.normalization:
y = self.norm2(y) # normalization 2
y = self.Attention(y)
# y = self.ProjectExciteLayer(y)
before_pooling = y # save the outputs before the pooling operation
if self.pooling:
y = self.pool(y) # pooling
return y, before_pooling
class UpBlock(nn.Module):
"""
A helper Module that performs 2 Convolutions and 1 UpConvolution/Upsample.
An activation follows each convolution.
A normalization layer follows each convolution.
"""
def __init__(self,
in_channels: int,
out_channels: int,
activation: str = 'relu',
normalization: str = None,
dim: int = 3,
conv_mode: str = 'same',
up_mode: str = 'transposed'
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalization = normalization
if conv_mode == 'same':
self.padding = 1
elif conv_mode == 'valid':
self.padding = 0
self.dim = dim
self.activation = activation
self.up_mode = up_mode
# upconvolution/upsample layer
self.up = get_up_layer(self.in_channels, self.out_channels, kernel_size=2, stride=2, dim=self.dim,
up_mode=self.up_mode)
# conv layers
self.conv0 = get_conv_layer(self.in_channels, self.out_channels, kernel_size=1, stride=1, padding=0,
bias=True, dim=self.dim)
self.conv1 = get_conv_layer(2 * self.out_channels, self.out_channels, kernel_size=3, stride=1,
padding=self.padding,
bias=True, dim=self.dim)
self.conv2 = get_conv_layer(self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=self.padding,
bias=True, dim=self.dim)
# activation layers
self.act0 = get_activation(self.activation)
self.act1 = get_activation(self.activation)
self.act2 = get_activation(self.activation)
# normalization layers
if self.normalization:
self.norm0 = get_normalization(normalization=self.normalization, num_channels=self.out_channels,
dim=self.dim)
self.norm1 = get_normalization(normalization=self.normalization, num_channels=self.out_channels,
dim=self.dim)
self.norm2 = get_normalization(normalization=self.normalization, num_channels=self.out_channels,
dim=self.dim)
# concatenate layer
self.concat = Concatenate()
def forward(self, encoder_layer, decoder_layer):
""" Forward pass
Arguments:
encoder_layer: Tensor from the encoder pathway
decoder_layer: Tensor from the decoder pathway (to be up'd)
"""
up_layer = self.up(decoder_layer) # up-convolution/up-sampling
cropped_encoder_layer, dec_layer = autocrop(encoder_layer, up_layer) # cropping
if self.up_mode != 'transposed':
# We need to reduce the channel dimension with a conv layer
up_layer = self.conv0(up_layer) # convolution 0
up_layer = self.act0(up_layer) # activation 0
if self.normalization:
up_layer = self.norm0(up_layer) # normalization 0
merged_layer = self.concat(up_layer, cropped_encoder_layer) # concatenation
y = self.conv1(merged_layer) # convolution 1
y = self.act1(y) # activation 1
if self.normalization:
y = self.norm1(y) # normalization 1
y = self.conv2(y) # convolution 2
y = self.act2(y) # acivation 2
if self.normalization:
y = self.norm2(y) # normalization 2
return y
class UNet(nn.Module):
def __init__(self,
in_channels: int = 1,
out_channels: int = 2,
n_blocks: int = 4,
start_filters: int = 32,
activation: str = 'relu',
normalization: str = 'batch',
conv_mode: str = 'same',
dim: int = 2,
up_mode: str = 'transposed'
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.n_blocks = n_blocks
self.start_filters = start_filters
self.activation = activation
self.normalization = normalization
self.conv_mode = conv_mode
self.dim = dim
self.up_mode = up_mode
self.down_blocks = []
self.up_blocks = []
# create encoder path
for i in range(self.n_blocks):
num_filters_in = self.in_channels if i == 0 else num_filters_out
num_filters_out = self.start_filters * (2 ** i)
pooling = True if i < self.n_blocks - 1 else False
down_block = DownBlock(in_channels=num_filters_in,
out_channels=num_filters_out,
pooling=pooling,
activation=self.activation,
normalization=self.normalization,
conv_mode=self.conv_mode,
dim=self.dim)
self.down_blocks.append(down_block)
# create decoder path (requires only n_blocks-1 blocks)
for i in range(n_blocks - 1):
num_filters_in = num_filters_out
num_filters_out = num_filters_in // 2
up_block = UpBlock(in_channels=num_filters_in,
out_channels=num_filters_out,
activation=self.activation,
normalization=self.normalization,
conv_mode=self.conv_mode,
dim=self.dim,
up_mode=self.up_mode)
self.up_blocks.append(up_block)
# final convolution
self.conv_final = get_conv_layer(num_filters_out, self.out_channels, kernel_size=1, stride=1, padding=0,
bias=True, dim=self.dim)
# add the list of modules to current module
self.down_blocks = nn.ModuleList(self.down_blocks)
self.up_blocks = nn.ModuleList(self.up_blocks)
# initialize the weights
self.initialize_parameters()
@staticmethod
def weight_init(module, method, **kwargs):
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d)):
method(module.weight, **kwargs) # weights
@staticmethod
def bias_init(module, method, **kwargs):
if isinstance(module, (nn.Conv3d, nn.Conv2d, nn.ConvTranspose3d, nn.ConvTranspose2d)):
method(module.bias, **kwargs) # bias
def initialize_parameters(self,
method_weights=nn.init.kaiming_uniform_,
method_bias=nn.init.zeros_,
kwargs_weights={},
kwargs_bias={}
):
for module in self.modules():
self.weight_init(module, method_weights, **kwargs_weights) # initialize weights
self.bias_init(module, method_bias, **kwargs_bias) # initialize bias
def forward(self, x: torch.tensor):
encoder_output = []
# Encoder pathway
for module in self.down_blocks:
x, before_pooling = module(x)
encoder_output.append(before_pooling)
# Decoder pathway
for i, module in enumerate(self.up_blocks):
before_pool = encoder_output[-(i + 2)]
x = module(before_pool, x)
x = self.conv_final(x)
return x
segmentor = UNet(in_channels=3,
out_channels=4,
n_blocks=4,
start_filters=32,
activation='ReLU',
normalization='batch',
conv_mode='same',
dim=3).to('cuda')
optimizer = torch.optim.Adam(segmentor.parameters(), lr=0.0001)
checkpoint = torch.load('/content/gdrive/MyDrive/weights_best_one.pth')
segmentor.load_state_dict(checkpoint['weights'])
###Output
_____no_output_____
###Markdown
DATA
###Code
TRAIN_IMG_DIR = "/content/gdrive/MyDrive/brain/Task01_BrainTumour/imagesTr"
TRAIN_MASK_DIR = "/content/gdrive/MyDrive/brain/Task01_BrainTumour/labelsTr"
VAL_IMG_DIR = "/content/gdrive/MyDrive/brain/Task01_BrainTumour/imagesVal"
VAL_MASK_DIR = "/content/gdrive/MyDrive/brain/Task01_BrainTumour/labelsVal"
TEST_IMG_DIR = "/content/gdrive/MyDrive/brain/Task01_BrainTumour/imagesTs"
def make_list(s):
l = sorted(os.listdir(s))
return l
images_train = make_list(TRAIN_IMG_DIR)
masks_train = make_list(TRAIN_MASK_DIR)
images_val = make_list(VAL_IMG_DIR)
masks_val = make_list(VAL_MASK_DIR)
images_test = make_list(TEST_IMG_DIR)
len(images_train), len(masks_train), len(images_val), len(masks_val), len(images_test)
#list of img and mask path......TRAIN
img_train_paths = []
mask_train_paths = []
#list of img and mask path......VAL
img_val_paths = []
mask_val_paths = []
#list of img path.......TEST
img_test_paths = []
for idx in range(len(images_train)):
img_train_paths.append(os.path.join(TRAIN_IMG_DIR, images_train[idx]))
mask_train_paths.append(os.path.join(TRAIN_MASK_DIR, masks_train[idx]))
for idx in range(len(images_val)):
img_val_paths.append(os.path.join(VAL_IMG_DIR, images_val[idx]))
mask_val_paths.append(os.path.join(VAL_MASK_DIR, masks_val[idx]))
for idx in range(len(images_test)):
img_test_paths.append(os.path.join(TEST_IMG_DIR, images_test[idx]))
len(img_train_paths), len(mask_train_paths), len(img_val_paths), len(mask_val_paths), len(img_test_paths)
from torch.utils.data.dataset import Dataset
from torch.utils.data import dataloader
class TrueDataset(Dataset):
def __init__(self, img_paths=None, mask_paths=None, transform_imgs=None, transform_mask=None):
self.img_paths = img_paths
self.mask_paths = mask_paths
self.transform_imgs = transform_imgs
self.transform_mask = transform_mask
if self.mask_paths is not None:
assert len(self.img_paths) == len(self.mask_paths)
self.images = len(self.img_paths) #list all the files present in that folder...
def __len__(self):
return len(self.img_paths) #length of dataset
def __getitem__(self, index):
img_path = self.img_paths[index]
image = nib.load(img_path).get_fdata(dtype=np.float32)
scaler = MinMaxScaler()
image = scaler.fit_transform(image.reshape(-1, image.shape[-1])).reshape(image.shape)
image = image[56:184, 56:184, 73:121]
image0 = image[:,:,:,0]
image2 = image[:,:,:,2]
image3 = image[:,:,:,3]
image = np.stack((image0,image2,image3),axis=3)
if self.mask_paths is not None:
mask_path = self.mask_paths[index]
mask = nib.load(mask_path).get_fdata(dtype=np.float32)
mask = mask.astype(np.uint8)
mask = mask[56:184, 56:184, 73:121]
mask = to_categorical(mask, num_classes=4)
# image = np.load(img_path)
image = torch.from_numpy(image)
image = image.permute(3,0,1,2)
if self.mask_paths is not None:
# mask = np.load(mask_path)
mask = torch.from_numpy(mask)
mask = mask.permute(3,0,1,2)
if self.transform_imgs is not None:
image = training_transforms(image)
if self.transform_mask is not None:
mask = training_transforms(mask)
if self.mask_paths is not None:
return image, mask
if self.mask_paths is None:
return image
from torch.utils.data.dataset import Dataset
from torch.utils.data import dataloader
from torch.autograd import Variable
class GeneratedDataset(Dataset):
def __init__(self, generator, segmentor):
self.segmentor = segmentor
self.generator = generator
def __len__(self):
return 10 #length of dataset
def __getitem__(self, index):
torch.manual_seed(index)
noise = Variable(torch.randn((1, 1000, 1, 1 ,1)).cuda())
fake_image = self.generator(noise)
fake_image = fake_image.view(64*2,64*2,64*2)
fake_image = fake_image.detach().to('cpu')
scaler = MinMaxScaler()
fake_image = scaler.fit_transform(fake_image.reshape(-1, fake_image.shape[-1])).reshape(fake_image.shape)
r = range(16,112,2)
image0 = fake_image[:,:,r]
r = range(17,112,2)
image2 = fake_image[:,:,r]
r = range(16,112,2)
image3 = fake_image[:,:,r]
image = np.stack((image0,image2,image3),axis=3)
image = torch.from_numpy(image)
image = image.type(torch.float32)
image = image.permute(3,0,1,2)
img = image.view(1,3, 128, 128, 48).to('cuda')
mask = self.segmentor(img).softmax(dim=1)
mask = mask.view(4,128,128,48)
return image, mask
###Output
_____no_output_____
###Markdown
FEDERATED PIPELINE
###Code
# Client 1
Client_1 = dict()
Client_1['datasource_1'] = TrueDataset(
img_paths=img_train_paths,
mask_paths=mask_train_paths
)
Client_1['dataloader_1'] = DataLoader(
Client_1['datasource_1'],
batch_size=4,
num_workers=2,
shuffle=True,
)
Client_1['datasource_2'] = GeneratedDataset(
generator = G,
segmentor = segmentor,
)
Client_1['dataloader_2'] = DataLoader(
Client_1['datasource_2'],
batch_size=1,
num_workers=0,
shuffle=True,
)
Client_1['model'] = UNet(in_channels=3,
out_channels=4,
n_blocks=4,
start_filters=32,
activation='ReLU',
normalization='batch',
conv_mode='same',
dim=3).to('cuda')
Client_1['optimizer'] = torch.optim.Adam(Client_1['model'].parameters(), lr=0.0001)
checkpoint = torch.load('/content/gdrive/MyDrive/weights_best_one.pth')
Client_1['model'].load_state_dict(checkpoint['weights'])
# Client 2
Client_2 = dict()
Client_2['datasource_1'] = TrueDataset(
img_paths=img_train_paths,
mask_paths=mask_train_paths
)
Client_2['dataloader_1'] = DataLoader(
Client_2['datasource_1'],
batch_size=4,
num_workers=2,
shuffle=True,
)
Client_2['datasource_2'] = GeneratedDataset(
generator = G,
segmentor = segmentor,
)
Client_2['dataloader_2'] = DataLoader(
Client_2['datasource_2'],
batch_size=1,
num_workers=0,
shuffle=True,
)
Client_2['model'] = UNet(in_channels=3,
out_channels=4,
n_blocks=4,
start_filters=32,
activation='ReLU',
normalization='batch',
conv_mode='same',
dim=3).to('cuda')
Client_2['optimizer'] = torch.optim.Adam(Client_2['model'].parameters(), lr=0.0001)
checkpoint = torch.load('/content/gdrive/MyDrive/weights_best_one.pth')
Client_2['model'].load_state_dict(checkpoint['weights'])
Clients = [Client_1, Client_2]
# Server
Server = dict()
Server['model'] = UNet(in_channels=3,
out_channels=4,
n_blocks=4,
start_filters=32,
activation='ReLU',
normalization='batch',
conv_mode='same',
dim=3).to('cuda')
Server['optimizer'] = torch.optim.Adam(Server['model'].parameters(), lr=0.0001)
checkpoint = torch.load('/content/gdrive/MyDrive/weights_best_one.pth')
Server['model'].load_state_dict(checkpoint['weights'])
# Loss Function
class DiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
# inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
return 1 - dice, dice
DiceLoss = DiceLoss()
ALPHA = 0.8
BETA = 0.2
GAMMA = 0.75
class FocalTverskyLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(FocalTverskyLoss, self).__init__()
def forward(self, inputs, targets, smooth=1, alpha=ALPHA, beta=BETA, gamma=GAMMA):
#comment out if your model contains a sigmoid or equivalent activation layer
# inputs = F.sigmoid(inputs)
c,d = DiceLoss(inputs, targets)
#flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
#True Positives, False Positives & False Negatives
TP = (inputs * targets).sum()
FP = ((1-targets) * inputs).sum()
FN = (targets * (1-inputs)).sum()
Tversky = (TP + smooth) / (TP + alpha*FP + beta*FN + smooth)
FocalTversky = (1 - Tversky)**gamma
return FocalTversky, d
FocalTverskyLoss = FocalTverskyLoss()
# Federated Training loop
from tqdm.auto import tqdm
import torch.distributions as tdist
for epoch in range(2):
print('\nEpoch:',epoch+1,'\n')
i = 1
for client in Clients:
print(f'Training Client {i}')
model = client['model']
optimizer = client['optimizer']
epoch_losses = []
dice_coefs = []
# Real Data
for batch_idx, (inputs, targets) in enumerate(tqdm(client['dataloader_1'])):
inputs = inputs.to('cuda')
targets = targets.to('cuda')
optimizer.zero_grad()
with torch.set_grad_enabled(True):
logits = model(inputs)
probabilities = F.softmax(logits, dim=1)
batch_losses, dice_coefficients = FocalTverskyLoss(probabilities, targets) #DiceLoss(probabilities, targets)
batch_loss = batch_losses.mean()
dice_coef = dice_coefficients.mean()
batch_loss.backward()
optimizer.step()
epoch_losses.append(batch_loss.detach().item())
dice_coefs.append(dice_coef.item())
# GAN Generated Data
for batch_idx, (inputs, targets) in enumerate(tqdm(client['dataloader_2'])):
inputs = inputs.to('cuda')
targets = targets.to('cuda')
optimizer.zero_grad()
with torch.set_grad_enabled(True):
logits = model(inputs)
probabilities = F.softmax(logits, dim=1)
batch_losses, dice_coefficients = FocalTverskyLoss(probabilities, targets) #DiceLoss(probabilities, targets)
batch_loss = batch_losses.mean()
dice_coef = dice_coefficients.mean()
batch_loss.backward()
optimizer.step()
epoch_losses.append(batch_loss.detach().item())
dice_coefs.append(dice_coef.item())
epoch_losses = np.array(epoch_losses)
dice_coefs = np.array(dice_coefs)
print(f'Mean loss: {epoch_losses.mean():0.3f} \t Dice score: {dice_coefs.mean():0.3f}\n')
i+=1
print('\nSending Weights to Central Server...\n')
# Updating Server
model = Server['model']
models = [Client_1['model'], Client_2['model']]
with torch.no_grad():
for key in model.state_dict().keys():
if models[0].state_dict()[key].dtype == torch.int64:
model.state_dict()[key].data.copy_(models[0].state_dict()[key])
else:
temp = torch.zeros_like(model.state_dict()[key])
# add noise
for s in range(len(models)):
n = tdist.Normal(0,1)
noise = n.sample(models[s].state_dict()[key].size()).squeeze()
noise = noise.to('cuda')
noise = noise.view(models[s].state_dict()[key].shape)
temp += 0.5*(models[s].state_dict()[key] + noise*1e-5)
# update server model
model.state_dict()[key].data.copy_(temp)
# updata client model
for s in range(len(models)):
models[s].state_dict()[key].data.copy_(model.state_dict()[key])
print('Central Server Updated...\n')
print('Local Clients Updated...\n')
###Output
Epoch: 1
Training Client 1
###Markdown
INFERENCE
###Code
val_ds = TrueDataset(
img_paths = img_val_paths,
mask_paths = mask_val_paths
)
val_loader = DataLoader(
val_ds,
batch_size=4,
num_workers=2,
shuffle=False,
)
model = Server['model']
# model = segmentor
model.eval()
with torch.no_grad():
for batch_idx, (inputs,outputs) in enumerate(tqdm(val_loader)):
inputs = inputs.to('cuda')
outputs = outputs.to('cuda')
logits = model(inputs).softmax(dim=1)
l = logits.cpu()
l = np.argmax(l, axis=1)
i = inputs.cpu()
o = outputs.cpu()
o = np.argmax(o, axis=1)
n_slice=random.randint(0, o.shape[3])
plt.figure(figsize=(12, 8))
no=0
print(n_slice,no)
plt.subplot(221)
plt.imshow(i[no,0,:,:, n_slice], cmap='gray')
plt.title('Image flair')
plt.subplot(222)
plt.imshow(i[no,1,:,:, n_slice], cmap='gray')
plt.title('Image t1ce')
plt.subplot(223)
plt.imshow(o[no,:,:,n_slice])
plt.title('Mask original')
plt.subplot(224)
plt.imshow(l[no,:,:,n_slice])
plt.title('Mask predicted')
plt.show()
###Output
_____no_output_____ |
themes/academic/exampleSite/content/post/jupyter/index.ipynb | ###Markdown
---title: Display Jupyter Notebooks with Academicsubtitle: Learn how to blog in Academic using Jupyter notebookssummary: Learn how to blog in Academic using Jupyter notebooksauthors:- admintags: []categories: []date: "2019-02-05T00:00:00Z"lastMod: "2019-09-05T00:00:00Z"featured: falsedraft: false Featured image To use, add an image named `featured.jpg/png` to your page's folder. image: caption: "" focal_point: "" Projects (optional). Associate this post with one or more of your projects. Simply enter your project's folder or file name without extension. E.g. `projects = ["internal-project"]` references `content/project/deep-learning/index.md`. Otherwise, set `projects = []`.projects: []---
###Code
from IPython.core.display import Image
Image('https://www.python.org/static/community_logos/python-logo-master-v3-TM-flattened.png')
print("Welcome to Academic!")
###Output
Welcome to Academic!
|
docs/plugins/tasks/version_control/gitlab.ipynb | ###Markdown
Gitlab
###Code
from nornir.plugins.tasks.version_control import gitlab
print(gitlab.__doc__)
###Output
Exposes some of the Gitlab API functionality for operations on files
in a Gitlab repository.
Example:
nornir.run(files.gitlab,
action="create",
url="https://gitlab.localhost.com",
token="ABCD1234",
repository="test",
filename="config",
ref="master")
Arguments:
dry_run: Whether to apply changes or not
url: Gitlab instance URL
token: Personal access token
repository: source/destination repository
filename: source/destination file name
content: content to write
action: ``create``, ``update``, ``get``
branch: destination branch
destination: local destination filename (only used in get action)
ref: branch, commit hash or tag (only used in get action)
commit_message: commit message
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
###Markdown
Example 1 : create a file in a git repository on a gitlab serverIn this example we will create a new file in a git repository on a gitlab server.The contents that we will write to the file is a arbitrary string, in a real world scenario this could be the running configuration of a device that we fetched using napalm or through another method.First let's import the necessary methods & tasks, then we will create a variable called `content` which is an arbitrary string.
###Code
from nornir import InitNornir
from nornir.plugins.tasks.version_control import gitlab
from nornir.plugins.tasks.commands import remote_command
from nornir.plugins.functions.text import print_result
inventory = {
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": "gitlab_data/inventory/hosts.yaml"
}
}
n = InitNornir(inventory=inventory)
content = """127.0.0.1\t\tlocalhost
255.255.255.255\tbroadcasthost
::1\t\tlocalhost
"""
###Output
_____no_output_____
###Markdown
And create a new file called `hosts` in the repository `test` on the `master` branch.
###Code
import requests_mock
from functools import wraps
def wrap_gitlab(f):
@wraps(f)
def wrapper(*args, **kwargs):
with requests_mock.Mocker() as m:
if kwargs.get("ref", None):
kwargs["branch"] = kwargs["ref"]
m.get(url=f"{kwargs['url']}/api/v4/projects?search={kwargs['repository']}", status_code=200, json=[{"name":"test","id":1}])
m.post(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=201)
m.get(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}?ref={kwargs['branch']}",status_code=200, json={"content":"MTI3LjAuMC4xCQlsb2NhbGhvc3QKMjU1LjI1NS4yNTUuMjU1CWJyb2FkY2FzdGhvc3QKOjoxCQls\nb2NhbGhvc3QK\n"})
m.put(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=200)
return f(*args, **kwargs)
return wrapper
gitlab = wrap_gitlab(gitlab)
result = n.run(
gitlab,
action="create",
url="http://localhost:8080",
token = "SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
#content=results["dev5.no_group"][0]
content=content,
commit_message="Nornir is AWESOME!"
)
###Output
_____no_output_____
###Markdown
The result of the task shows us a diff of the created `hosts` file and the content we provided.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m---
+++ hosts
@@ -0,0 +1,3 @@
+127.0.0.1 localhost
+255.255.255.255 broadcasthost
+::1 localhost[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m
###Markdown
Example 2 : update an existing file in a git repository on a gitlab server In this example we will update the contents of the hosts file that we created in the previous step. The new contents could come again from a remote host or device, but in this case we will use an arbitrary value for the new contents of the file.
###Code
result = n.run(
gitlab,
action="update",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
content=f"{content}8.8.8.8\t\tgoogledns",
commit_message="Added new line to hosts file"
)
###Output
_____no_output_____
###Markdown
The result of the task should show us a diff of the changes that we made.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m--- hosts
+++ hosts
@@ -1,3 +1,4 @@
127.0.0.1 localhost
255.255.255.255 broadcasthost
::1 localhost
+8.8.8.8 googledns[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m
###Markdown
Example 3: get a file from a gitlab repository In this example we will get/download a file from a repository in gitlab. The contents of this file could be a staged configuration of a device or a service on a device. This configuration could then be pushed to the device.In our example we will download the file `hosts` from the `master` branch and save it as `/tmp/hosts`.The `ref` parameter can also be a commit hash or tag.
###Code
!rm -f /tmp/hosts
result = n.run(
gitlab,
action="get",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
ref="master",
filename="hosts",
destination="/tmp/hosts"
)
###Output
[0m[0m
###Markdown
The result should show us a new file `/tmp/hosts` being created on the local system.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m--- /tmp/hosts
+++ /tmp/hosts
@@ -0,0 +1,3 @@
+127.0.0.1 localhost
+255.255.255.255 broadcasthost
+::1 localhost[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m
###Markdown
Gitlab
###Code
from nornir.plugins.tasks.version_control import gitlab
print(gitlab.__doc__)
###Output
Exposes some of the Gitlab API functionality for operations on files
in a Gitlab repository.
Example:
nornir.run(files.gitlab,
action="create",
url="https://gitlab.localhost.com",
token="ABCD1234",
repository="test",
filename="config",
ref="master")
Arguments:
dry_run: Whether to apply changes or not
url: Gitlab instance URL
token: Personal access token
repository: source/destination repository
filename: source/destination file name
content: content to write
action: ``create``, ``update``, ``get``
branch: destination branch
destination: local destination filename (only used in get action)
ref: branch, commit hash or tag (only used in get action)
commit_message: commit message
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
###Markdown
Example 1 : create a file in a git repository on a gitlab serverIn this example we will create a new file in a git repository on a gitlab server.The contents that we will write to the file is a arbitrary string, in a real world scenario this could be the running configuration of a device that we fetched using napalm or through another method.First let's import the necessary methods & tasks, then we will create a variable called `content` which is an arbitrary string.
###Code
from nornir import InitNornir
from nornir.plugins.tasks.version_control import gitlab
from nornir.plugins.tasks.commands import remote_command
from nornir.plugins.functions.text import print_result
inventory = {
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": "gitlab_data/inventory/hosts.yaml"
}
}
n = InitNornir(inventory=inventory)
content = """127.0.0.1\t\tlocalhost
255.255.255.255\tbroadcasthost
::1\t\tlocalhost
"""
###Output
_____no_output_____
###Markdown
And create a new file called `hosts` in the repository `test` on the `master` branch.
###Code
import requests_mock
from functools import wraps
def wrap_gitlab(f):
@wraps(f)
def wrapper(*args, **kwargs):
with requests_mock.Mocker() as m:
if kwargs.get("ref", None):
kwargs["branch"] = kwargs["ref"]
m.get(url=f"{kwargs['url']}/api/v4/projects?search={kwargs['repository']}", status_code=200, json=[{"name":"test","id":1}])
m.post(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=201)
m.get(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}?ref={kwargs['branch']}",status_code=200, json={"content":"MTI3LjAuMC4xCQlsb2NhbGhvc3QKMjU1LjI1NS4yNTUuMjU1CWJyb2FkY2FzdGhvc3QKOjoxCQls\nb2NhbGhvc3QK\n"})
m.put(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=200)
return f(*args, **kwargs)
return wrapper
gitlab = wrap_gitlab(gitlab)
result = n.run(
gitlab,
action="create",
url="http://localhost:8080",
token = "SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
#content=results["dev5.no_group"][0]
content=content,
commit_message="Nornir is AWESOME!"
)
###Output
_____no_output_____
###Markdown
The result of the task shows us a diff of the created `hosts` file and the content we provided.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Example 2 : update an existing file in a git repository on a gitlab server In this example we will update the contents of the hosts file that we created in the previous step. The new contents could come again from a remote host or device, but in this case we will use an arbitrary value for the new contents of the file.
###Code
result = n.run(
gitlab,
action="update",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
content=f"{content}8.8.8.8\t\tgoogledns",
commit_message="Added new line to hosts file"
)
###Output
_____no_output_____
###Markdown
The result of the task should show us a diff of the changes that we made.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Example 3: get a file from a gitlab repository In this example we will get/download a file from a repository in gitlab. The contents of this file could be a staged configuration of a device or a service on a device. This configuration could then be pushed to the device.In our example we will download the file `hosts` from the `master` branch and save it as `/tmp/hosts`.The `ref` parameter can also be a commit hash or tag.
###Code
result = n.run(
gitlab,
action="get",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
ref="master",
filename="hosts",
destination="/tmp/hosts"
)
###Output
_____no_output_____
###Markdown
The result should show us a new file `/tmp/hosts` being created on the local system.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Gitlab
###Code
from nornir.plugins.tasks.version_control import gitlab
print(gitlab.__doc__)
###Output
Exposes some of the Gitlab API functionality for operations on files
in a Gitlab repository.
Example:
nornir.run(files.gitlab,
action="create",
url="https://gitlab.localhost.com",
token="ABCD1234",
repository="test",
filename="config",
ref="master")
Arguments:
dry_run: Whether to apply changes or not
url: Gitlab instance URL
token: Personal access token
repository: source/destination repository
filename: source/destination file name
content: content to write
action: ``create``, ``update``, ``get``
branch: destination branch
destination: local destination filename (only used in get action)
ref: branch, commit hash or tag (only used in get action)
commit_message: commit message
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
###Markdown
Example 1 : create a file in a git repository on a gitlab serverIn this example we will create a new file in a git repository on a gitlab server.The contents that we will write to the file is a arbitary string, in a real world scenario this could be the running configuration of a device that we fetched using napalm or through another method.First let's import the necessary methods & tasks, then we will create a variable called `content` which is an arbitary string.
###Code
from nornir import InitNornir
from nornir.plugins.tasks.version_control import gitlab
from nornir.plugins.tasks.commands import remote_command
from nornir.plugins.functions.text import print_result
inventory = {
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": "gitlab_data/inventory/hosts.yaml"
}
}
n = InitNornir(inventory=inventory)
content = """127.0.0.1\t\tlocalhost
255.255.255.255\tbroadcasthost
::1\t\tlocalhost
"""
###Output
_____no_output_____
###Markdown
And create a new file called `hosts` in the repository `test` on the `master` branch.
###Code
import requests_mock
from functools import wraps
def wrap_gitlab(f):
@wraps(f)
def wrapper(*args, **kwargs):
with requests_mock.Mocker() as m:
if kwargs.get("ref", None):
kwargs["branch"] = kwargs["ref"]
m.get(url=f"{kwargs['url']}/api/v4/projects?search={kwargs['repository']}", status_code=200, json=[{"name":"test","id":1}])
m.post(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=201)
m.get(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}?ref={kwargs['branch']}",status_code=200, json={"content":"MTI3LjAuMC4xCQlsb2NhbGhvc3QKMjU1LjI1NS4yNTUuMjU1CWJyb2FkY2FzdGhvc3QKOjoxCQls\nb2NhbGhvc3QK\n"})
m.put(url=f"{kwargs['url']}/api/v4/projects/1/repository/files/{kwargs['filename']}", status_code=200)
return f(*args, **kwargs)
return wrapper
gitlab = wrap_gitlab(gitlab)
result = n.run(
gitlab,
action="create",
url="http://localhost:8080",
token = "SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
#content=results["dev5.no_group"][0]
content=content,
commit_message="Nornir is AWESOME!"
)
###Output
_____no_output_____
###Markdown
The result of the task shows us a diff of the created `hosts` file and the content we provided.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Example 2 : update an existing file in a git repository on a gitlab server In this example we will update the contents of the hosts file that we created in the previous step. The new contents could come again from a remote host or device, but in this case we will use an arbitary value for the new contents of the file.
###Code
result = n.run(
gitlab,
action="update",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
content=f"{content}8.8.8.8\t\tgoogledns",
commit_message="Added new line to hosts file"
)
###Output
_____no_output_____
###Markdown
The result of the task should show us a diff of the changes that we made.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Example 3: get a file from a gitlab repository In this example we will get/download a file from a repository in gitlab. The contents of this file could be a staged configuration of a device or a service on a device. This configuration could then be pushed to the device.In our example we will download the file `hosts` from the `master` branch and save it as `/tmp/hosts`.The `ref` parameter can also be a commit hash or tag.
###Code
result = n.run(
gitlab,
action="get",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
ref="master",
filename="hosts",
destination="/tmp/hosts"
)
###Output
_____no_output_____
###Markdown
The result should show us a new file `/tmp/hosts` being created on the local system.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************
###Markdown
Gitlab
###Code
from nornir.plugins.tasks.version_control import gitlab
print(gitlab.__doc__)
###Output
Exposes some of the Gitlab API functionality for operations on files
in a Gitlab repository.
Example:
nornir.run(files.gitlab,
action="create",
url="https://gitlab.localhost.com",
token="ABCD1234",
repository="test",
filename="config",
ref="master")
Arguments:
dry_run: Whether to apply changes or not
url: Gitlab instance URL
token: Personal access token
repository: source/destination repository
filename: source/destination file name
content: content to write
action: ``create``, ``update``, ``get``
branch: destination branch
destination: local destination filename (only used in get action)
ref: branch, commit hash or tag (only used in get action)
commit_message: commit message
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
###Markdown
Example 1 : create a file in a git repository on a gitlab serverIn this example we will create a new file in a git repository on a gitlab server.The contents that we will write to the file is a arbitrary string, in a real world scenario this could be the running configuration of a device that we fetched using napalm or through another method.First let's import the necessary methods & tasks, then we will create a variable called `content` which is an arbitrary string.
###Code
from nornir import InitNornir
from nornir.plugins.tasks.version_control import gitlab
from nornir.plugins.tasks.commands import remote_command
from nornir.plugins.functions.text import print_result
inventory = {
"plugin": "nornir.plugins.inventory.simple.SimpleInventory",
"options": {
"host_file": "gitlab_data/inventory/hosts.yaml"
}
}
n = InitNornir(inventory=inventory)
content = """127.0.0.1\t\tlocalhost
255.255.255.255\tbroadcasthost
::1\t\tlocalhost
"""
###Output
_____no_output_____
###Markdown
And create a new file called `hosts` in the repository `test` on the `master` branch.
###Code
import requests_mock
from functools import wraps
def wrap_gitlab(f):
@wraps(f)
def wrapper(*args, **kwargs):
with requests_mock.Mocker() as m:
if kwargs.get("ref", None):
kwargs["branch"] = kwargs["ref"]
m.post(url=f"{kwargs['url']}/api/v4/projects/{kwargs['repository']}/repository/files/{kwargs['filename']}", status_code=201)
m.get(url=f"{kwargs['url']}/api/v4/projects/{kwargs['repository']}/repository/files/{kwargs['filename']}?ref={kwargs['branch']}",status_code=200, json={"content":"MTI3LjAuMC4xCQlsb2NhbGhvc3QKMjU1LjI1NS4yNTUuMjU1CWJyb2FkY2FzdGhvc3QKOjoxCQls\nb2NhbGhvc3QK\n"})
m.put(url=f"{kwargs['url']}/api/v4/projects/{kwargs['repository']}/repository/files/{kwargs['filename']}", status_code=200)
return f(*args, **kwargs)
return wrapper
gitlab = wrap_gitlab(gitlab)
result = n.run(
gitlab,
action="create",
url="http://localhost:8080",
token = "SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
#content=results["dev5.no_group"][0]
content=content,
commit_message="Nornir is AWESOME!"
)
###Output
_____no_output_____
###Markdown
The result of the task shows us a diff of the created `hosts` file and the content we provided.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m---
+++ hosts
@@ -0,0 +1,3 @@
+127.0.0.1 localhost
+255.255.255.255 broadcasthost
+::1 localhost[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m
###Markdown
Example 2 : update an existing file in a git repository on a gitlab server In this example we will update the contents of the hosts file that we created in the previous step. The new contents could come again from a remote host or device, but in this case we will use an arbitrary value for the new contents of the file.
###Code
result = n.run(
gitlab,
action="update",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
branch="master",
filename="hosts",
content=f"{content}8.8.8.8\t\tgoogledns",
commit_message="Added new line to hosts file"
)
###Output
_____no_output_____
###Markdown
The result of the task should show us a diff of the changes that we made.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m--- hosts
+++ hosts
@@ -1,3 +1,4 @@
127.0.0.1 localhost
255.255.255.255 broadcasthost
::1 localhost
+8.8.8.8 googledns[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m
###Markdown
Example 3: get a file from a gitlab repository In this example we will get/download a file from a repository in gitlab. The contents of this file could be a staged configuration of a device or a service on a device. This configuration could then be pushed to the device.In our example we will download the file `hosts` from the `master` branch and save it as `/tmp/hosts`.The `ref` parameter can also be a commit hash or tag.
###Code
!rm -f /tmp/hosts
result = n.run(
gitlab,
action="get",
url="http://localhost:8080",
token="SuperSecretToken",
repository="test",
ref="master",
filename="hosts",
destination="/tmp/hosts"
)
###Output
[0m[0m
###Markdown
The result should show us a new file `/tmp/hosts` being created on the local system.
###Code
print_result(result)
###Output
[1m[36mgitlab**************************************************************************[0m
[0m[1m[34m* alpine ** changed : True *****************************************************[0m
[0m[1m[33mvvvv gitlab ** changed : True vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO[0m
[0m--- /tmp/hosts
+++ /tmp/hosts
@@ -0,0 +1,3 @@
+127.0.0.1 localhost
+255.255.255.255 broadcasthost
+::1 localhost[0m
[0m[1m[33m^^^^ END gitlab ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^[0m
[0m |
tv/tvl1den_clr_cupy.ipynb | ###Markdown
Colour ℓ1-TV Denoising (CuPy Version)=====================================This example demonstrates the use of class [tvl1.TVL1Denoise](http://sporco.rtfd.org/en/latest/modules/sporco.admm.tvl1.htmlsporco.admm.tvl1.TVL1Denoise) for removing salt & pepper noise from a colour image using Total Variation regularization with an ℓ1 data fidelity term (ℓ1-TV denoising). This variant of the example uses the GPU accelerated version of [tvl1](http://sporco.rtfd.org/en/latest/modules/sporco.admm.tvl1.htmlmodule-sporco.admm.tvl1) within the [sporco.cupy](http://sporco.rtfd.org/en/latest/modules/sporco.cupy.htmlmodule-sporco.cupy) subpackage.
###Code
from __future__ import print_function
from builtins import input
import numpy as np
from sporco import util
from sporco import signal
from sporco import metric
from sporco import plot
plot.config_notebook_plotting()
from sporco.cupy import (cupy_enabled, np2cp, cp2np, select_device_by_load,
gpu_info)
from sporco.cupy.admm import tvl1
###Output
_____no_output_____
###Markdown
Load reference image.
###Code
img = util.ExampleImages().image('monarch.png', scaled=True,
idxexp=np.s_[:,160:672])
###Output
_____no_output_____
###Markdown
Construct test image corrupted by 20% salt & pepper noise.
###Code
np.random.seed(12345)
imgn = signal.spnoise(img, 0.2)
###Output
_____no_output_____
###Markdown
Set regularization parameter and options for ℓ1-TV denoising solver. The regularization parameter used here has been manually selected for good performance.
###Code
lmbda = 8e-1
opt = tvl1.TVL1Denoise.Options({'Verbose': True, 'MaxMainIter': 200,
'RelStopTol': 5e-3, 'gEvalY': False,
'AutoRho': {'Enabled': True}})
###Output
_____no_output_____
###Markdown
Create solver object and solve, returning the the denoised image ``imgr``.
###Code
if not cupy_enabled():
print('CuPy/GPU device not available: running without GPU acceleration\n')
else:
id = select_device_by_load()
info = gpu_info()
if info:
print('Running on GPU %d (%s)\n' % (id, info[id].name))
b = tvl1.TVL1Denoise(np2cp(imgn), lmbda, opt)
imgr = cp2np(b.solve())
###Output
Running on GPU 0 (GeForce RTX 2080 Ti)
###Markdown
Display solve time and denoising performance.
###Code
print("TVL1Denoise solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgr))
###Output
TVL1Denoise solve time: 0.95 s
Noisy image PSNR: 12.02 dB
Denoised image PSNR: 29.29 dB
###Markdown
Display reference, corrupted, and denoised images.
###Code
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Corrupted', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgr, title=r'Restored ($\ell_1$-TV)', fig=fig)
fig.show()
###Output
_____no_output_____
###Markdown
Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
###Code
its = b.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
###Output
_____no_output_____ |
views/code/build-page-simple.ipynb | ###Markdown
Run the following cell to generate an index sorted alphabetically by lowercase term local name. Omit this index if the terms have opaque local names.
###Code
# generate the index of terms grouped by category and sorted alphabetically by lowercase term local name
text = '### 3.1 Index By Term Name\n\n'
text += '(See also [3.2 Index By Label](#32-index-by-label))\n\n'
for category in range(0,len(display_order)):
text += '**' + display_label[category] + '**\n'
text += '\n'
if organized_in_categories:
filtered_table = terms_sorted_by_localname[terms_sorted_by_localname['tdwgutility_organizedInClass']==display_order[category]]
filtered_table.reset_index(drop=True, inplace=True)
else:
filtered_table = terms_sorted_by_localname
filtered_table.reset_index(drop=True, inplace=True)
for row_index,row in filtered_table.iterrows():
curie = row['pref_ns_prefix'] + ":" + row['term_localName']
curie_anchor = curie.replace(':','_')
text += '[' + curie + '](#' + curie_anchor + ') |\n'
text = text[:len(text)-2] # remove final trailing vertical bar and newline
text += '\n\n' # put back removed newline
index_by_name = text
print(index_by_name)
###Output
_____no_output_____
###Markdown
Run the following cell to generate an index by term label
###Code
text = '\n\n'
# Comment out the following two lines if there is no index by local names
#text = '### 3.2 Index By Label\n\n'
#text += '(See also [3.1 Index By Term Name](#31-index-by-term-name))\n\n'
for category in range(0,len(display_order)):
if organized_in_categories:
text += '**' + display_label[category] + '**\n'
text += '\n'
filtered_table = terms_sorted_by_label[terms_sorted_by_label['tdwgutility_organizedInClass']==display_order[category]]
filtered_table.reset_index(drop=True, inplace=True)
else:
filtered_table = terms_sorted_by_label
filtered_table.reset_index(drop=True, inplace=True)
for row_index,row in filtered_table.iterrows():
if row_index == 0 or (row_index != 0 and row['label'] != filtered_table.iloc[row_index - 1].loc['label']): # this is a hack to prevent duplicate labels
curie_anchor = row['pref_ns_prefix'] + "_" + row['term_localName']
text += '[' + row['label'] + '](#' + curie_anchor + ') |\n'
text = text[:len(text)-2] # remove final trailing vertical bar and newline
text += '\n\n' # put back removed newline
index_by_label = text
print(index_by_label)
decisions_df = pd.read_csv('https://raw.githubusercontent.com/tdwg/rs.tdwg.org/master/decisions/decisions-links.csv', na_filter=False)
# generate a table for each term, with terms grouped by category
# generate the Markdown for the terms table
text = '## 4 Vocabulary\n'
for category in range(0,len(display_order)):
if organized_in_categories:
text += '### 4.' + str(category + 1) + ' ' + display_label[category] + '\n'
text += '\n'
text += display_comments[category] # insert the comments for the category, if any.
filtered_table = terms_sorted_by_localname[terms_sorted_by_localname['tdwgutility_organizedInClass']==display_order[category]]
filtered_table.reset_index(drop=True, inplace=True)
else:
filtered_table = terms_sorted_by_localname
filtered_table.reset_index(drop=True, inplace=True)
for row_index,row in filtered_table.iterrows():
text += '<table>\n'
curie = row['pref_ns_prefix'] + ":" + row['term_localName']
curieAnchor = curie.replace(':','_')
text += '\t<thead>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<th colspan="2"><a id="' + curieAnchor + '"></a>Term Name ' + curie + '</th>\n'
text += '\t\t</tr>\n'
text += '\t</thead>\n'
text += '\t<tbody>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<td>Term IRI</td>\n'
uri = row['pref_ns_uri'] + row['term_localName']
text += '\t\t\t<td><a href="' + uri + '">' + uri + '</a></td>\n'
text += '\t\t</tr>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<td>Modified</td>\n'
text += '\t\t\t<td>' + row['term_modified'] + '</td>\n'
text += '\t\t</tr>\n'
if row['version_iri'] != '':
text += '\t\t<tr>\n'
text += '\t\t\t<td>Term version IRI</td>\n'
text += '\t\t\t<td><a href="' + row['version_iri'] + '">' + row['version_iri'] + '</a></td>\n'
text += '\t\t</tr>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<td>Label</td>\n'
text += '\t\t\t<td>' + row['label'] + '</td>\n'
text += '\t\t</tr>\n'
if row['term_deprecated'] != '':
text += '\t\t<tr>\n'
text += '\t\t\t<td></td>\n'
text += '\t\t\t<td><strong>This term is deprecated and should no longer be used.</strong></td>\n'
text += '\t\t</tr>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<td>Definition</td>\n'
text += '\t\t\t<td>' + row['definition'] + '</td>\n'
text += '\t\t</tr>\n'
if row['usage'] != '':
text += '\t\t<tr>\n'
text += '\t\t\t<td>Usage</td>\n'
text += '\t\t\t<td>' + convert_link(convert_code(row['usage'])) + '</td>\n'
text += '\t\t</tr>\n'
if row['notes'] != '':
text += '\t\t<tr>\n'
text += '\t\t\t<td>Notes</td>\n'
text += '\t\t\t<td>' + convert_link(convert_code(row['notes'])) + '</td>\n'
text += '\t\t</tr>\n'
if row['examples'] != '':
text += '\t\t<tr>\n'
text += '\t\t\t<td>Examples</td>\n'
text += '\t\t\t<td>' + convert_link(convert_code(row['examples'])) + '</td>\n'
text += '\t\t</tr>\n'
if (vocab_type == 2 or vocab_type == 3) and row['controlled_value_string'] != '': # controlled vocabulary
text += '\t\t<tr>\n'
text += '\t\t\t<td>Controlled value</td>\n'
text += '\t\t\t<td>' + row['controlled_value_string'] + '</td>\n'
text += '\t\t</tr>\n'
if vocab_type == 3 and row['skos_broader'] != '': # controlled vocabulary with skos:broader relationships
text += '\t\t<tr>\n'
text += '\t\t\t<td>Has broader concept</td>\n'
curieAnchor = row['skos_broader'].replace(':','_')
text += '\t\t\t<td><a href="#' + curieAnchor + '">' + row['skos_broader'] + '</a></td>\n'
text += '\t\t</tr>\n'
text += '\t\t<tr>\n'
text += '\t\t\t<td>Type</td>\n'
if row['type'] == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property':
text += '\t\t\t<td>Property</td>\n'
elif row['type'] == 'http://www.w3.org/2000/01/rdf-schema#Class':
text += '\t\t\t<td>Class</td>\n'
elif row['type'] == 'http://www.w3.org/2004/02/skos/core#Concept':
text += '\t\t\t<td>Concept</td>\n'
else:
text += '\t\t\t<td>' + row['type'] + '</td>\n' # this should rarely happen
text += '\t\t</tr>\n'
# Look up decisions related to this term
for drow_index,drow in decisions_df.iterrows():
if drow['linked_affected_resource'] == uri:
text += '\t\t<tr>\n'
text += '\t\t\t<td>Executive Committee decision</td>\n'
text += '\t\t\t<td><a href="http://rs.tdwg.org/decisions/' + drow['decision_localName'] + '">http://rs.tdwg.org/decisions/' + drow['decision_localName'] + '</a></td>\n'
text += '\t\t</tr>\n'
text += '\t</tbody>\n'
text += '</table>\n'
text += '\n'
text += '\n'
term_table = text
print(term_table)
###Output
## 4 Vocabulary
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p001"></a>Term Name dwcpw:p001</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p001">http://rs.tdwg.org/dwc/pw/p001</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p001-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p001-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>biological control</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms occuring in an area because they were introduced for the purpose of biological control of another organism.</td>
</tr>
<tr>
<td>Notes</td>
<td>Released intentionally into the (semi)natural environment with the purpose of controlling the population(s) of one or more organisms. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>biologicalControl</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p002"></a>Term Name dwcpw:p002</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p002">http://rs.tdwg.org/dwc/pw/p002</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p002-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p002-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>erosion control</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms introduced for the purpose of erosion control/dune stabilization (windbreaks, hedges, etc).</td>
</tr>
<tr>
<td>Notes</td>
<td>Probably only applicable only to plants. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>erosionControl</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p003"></a>Term Name dwcpw:p003</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p003">http://rs.tdwg.org/dwc/pw/p003</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p003-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p003-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>fishery in the wild</td>
</tr>
<tr>
<td>Definition</td>
<td>Fish stocked into the wild either to create a fishery or for recreational angling.</td>
</tr>
<tr>
<td>Notes</td>
<td>Largely applicable to freshwater and anadromous fish. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>fisheryInTheWild</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p004"></a>Term Name dwcpw:p004</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p004">http://rs.tdwg.org/dwc/pw/p004</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p004-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p004-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>hunting</td>
</tr>
<tr>
<td>Definition</td>
<td>Animals stocked into the wild specifically with the intention that they would be hunted for sport.</td>
</tr>
<tr>
<td>Notes</td>
<td>Largely applicable to terrestrial vertebrates. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>hunting</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p005"></a>Term Name dwcpw:p005</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p005">http://rs.tdwg.org/dwc/pw/p005</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p005-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p005-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>landscape improvement</td>
</tr>
<tr>
<td>Definition</td>
<td>Landscape/flora/fauna "improvement" in the wild.</td>
</tr>
<tr>
<td>Notes</td>
<td>"Improvement" in this context is intended for introductions for the purpose of aesthetic enhancement of the landscape, as opposed to practical introductions for the purpose of erosion control, agriculture, forestry etc. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>landscapeImprovement</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p006"></a>Term Name dwcpw:p006</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p006">http://rs.tdwg.org/dwc/pw/p006</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p006-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p006-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>conservation or wildlife management</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms introduced for conservation purposes or wildlife management.</td>
</tr>
<tr>
<td>Notes</td>
<td>The organism was released with the intention of improving its conservation status of the species or the conservation status other species in the habitat. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>conservationOrWildlifeManagement</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p007"></a>Term Name dwcpw:p007</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p007">http://rs.tdwg.org/dwc/pw/p007</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p007-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p007-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>released for use</td>
</tr>
<tr>
<td>Definition</td>
<td>Release in nature for use (other than above, e.g., fur, transport, medical use).</td>
</tr>
<tr>
<td>Notes</td>
<td>This term refers to organisms intentionally and directly released into the wild to serve a specific purpose. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>releasedForUse</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p008"></a>Term Name dwcpw:p008</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p008">http://rs.tdwg.org/dwc/pw/p008</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p008-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p008-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>other intentional release</td>
</tr>
<tr>
<td>Definition</td>
<td>A catch-all for intentional releases not for human use that are not covered by other more specific terms.</td>
</tr>
<tr>
<td>Notes</td>
<td>Compare with "other escape from confinement". See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>otherIntentionalRelease</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p045">dwcpw:p045</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p009"></a>Term Name dwcpw:p009</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p009">http://rs.tdwg.org/dwc/pw/p009</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p009-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p009-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>agriculture (including biofuel feedstocks)</td>
</tr>
<tr>
<td>Definition</td>
<td>Plants grown with then intention of harvesting.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>agriculture</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p010"></a>Term Name dwcpw:p010</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p010">http://rs.tdwg.org/dwc/pw/p010</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p010-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p010-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>aquaculture/mariculture</td>
</tr>
<tr>
<td>Definition</td>
<td>The analog of agriculture and farmed animals, specifically related to aquatic organisms.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>aquacultureMariculture</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p011"></a>Term Name dwcpw:p011</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p011">http://rs.tdwg.org/dwc/pw/p011</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p011-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p011-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>botanic garden/zoo/aquaria (excluding domestic aquaria)</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms in public collections of plants and/or animals.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>publisGardenZooAquaria</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p012"></a>Term Name dwcpw:p012</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p012">http://rs.tdwg.org/dwc/pw/p012</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p012-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p012-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>pet/aquarium/terrarium species (including live food for such species )</td>
</tr>
<tr>
<td>Definition</td>
<td>Privately kept animals.</td>
</tr>
<tr>
<td>Usage</td>
<td>Animals kept for hunting, such as falcons and ferrets SHOULD be included here, not under the hunting term</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>pet</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p013"></a>Term Name dwcpw:p013</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p013">http://rs.tdwg.org/dwc/pw/p013</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p013-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p013-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>farmed animals (including animals left under limited control)</td>
</tr>
<tr>
<td>Definition</td>
<td>Animals cared for and bred with the specific intention of using their products, such as meat and milk.</td>
</tr>
<tr>
<td>Notes</td>
<td>Farmed animals are generally kept in a defined area, such as a fields. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>farmedAnimals</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p014"></a>Term Name dwcpw:p014</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p014">http://rs.tdwg.org/dwc/pw/p014</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p014-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p014-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>forestry (including reforestation)</td>
</tr>
<tr>
<td>Definition</td>
<td>Trees specifically introduced to provide timber and other forestry products.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>forestry</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p015"></a>Term Name dwcpw:p015</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p015">http://rs.tdwg.org/dwc/pw/p015</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p015-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p015-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>fur farms</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms escaped from a fur farms, including unauthorised releases.</td>
</tr>
<tr>
<td>Notes</td>
<td>Probably only applicable to vertebrates raised for their pelts and skins. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>fur</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p016"></a>Term Name dwcpw:p016</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p016">http://rs.tdwg.org/dwc/pw/p016</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p016-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p016-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>horticulture</td>
</tr>
<tr>
<td>Definition</td>
<td>Plants distributed by the ornamental and decorative plants industry. </td>
</tr>
<tr>
<td>Usage</td>
<td>This term excludes plants and other organisms from aquaria and terrariums from the aquarium and terrarium trade which SHOULD be classified under the pet/aquarium/terrarium term.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>horticulture</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p017"></a>Term Name dwcpw:p017</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p017">http://rs.tdwg.org/dwc/pw/p017</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p017-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p017-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>ornamental purpose other than horticulture</td>
</tr>
<tr>
<td>Definition</td>
<td>Ornamental plants introduced through pathways other than the horticultural industry.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>ornamentalNonHorticulture</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p018"></a>Term Name dwcpw:p018</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p018">http://rs.tdwg.org/dwc/pw/p018</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p018-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p018-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>research and ex-situ breeding (in facilities)</td>
</tr>
<tr>
<td>Definition</td>
<td>Plants and animals introduced for the purpose of breeding, scientific and medical research, including science education.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>research</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p019"></a>Term Name dwcpw:p019</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p019">http://rs.tdwg.org/dwc/pw/p019</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p019-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p019-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>live food and live bait</td>
</tr>
<tr>
<td>Definition</td>
<td>Live food imported for human consumption, such as shellfish and snails, and for live bait. </td>
</tr>
<tr>
<td>Notes</td>
<td>Live food, such as mealworms, for the organisms kept as pets should be classified under the pet term. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>liveFoodLiveBait</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p020"></a>Term Name dwcpw:p020</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p020">http://rs.tdwg.org/dwc/pw/p020</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p020-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p020-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>other escape from confinement</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms brought into an area with the intention of keeping them in captivity permanently, but that have subsequently escaped.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>otherEscape</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p046">dwcpw:p046</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p021"></a>Term Name dwcpw:p021</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p021">http://rs.tdwg.org/dwc/pw/p021</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p021-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p021-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>contaminant nursery material</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported into an area together with plant material.</td>
</tr>
<tr>
<td>Notes</td>
<td>These may be other plants, diseases, fungi and animals. They may be attached to the plant or within the soil. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>contaminantNursery</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p022"></a>Term Name dwcpw:p022</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p022">http://rs.tdwg.org/dwc/pw/p022</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p022-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p022-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>contaminated bait</td>
</tr>
<tr>
<td>Definition</td>
<td>Contaminants, pathogens and parasites transported with live, frozen or preserved bait used to catch fish or other organisms.</td>
</tr>
<tr>
<td>Notes</td>
<td>Typical examples include crustaceans, cephalopods and molluscs. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>contaminateBait</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p023"></a>Term Name dwcpw:p023</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p023">http://rs.tdwg.org/dwc/pw/p023</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p023-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p023-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>food contaminant (including of live food)</td>
</tr>
<tr>
<td>Definition</td>
<td>Foods for human consumption, whether they are transported life or dead.</td>
</tr>
<tr>
<td>Notes</td>
<td>This term includes unintentional introduction of contaminants such as diseases on those foods and in the case of plants, should include seeds. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>foodContaminant</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p024"></a>Term Name dwcpw:p024</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p024">http://rs.tdwg.org/dwc/pw/p024</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p024-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p024-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>contaminant on animals (except parasites, organisms transported by host/vector)</td>
</tr>
<tr>
<td>Definition</td>
<td>Contaminants carried either on or in the body of transported animals.</td>
</tr>
<tr>
<td>Usage</td>
<td>This term excludes parasites and pathogens, which SHOULD be classified under their own specific term ("parasites on animals"). </td>
</tr>
<tr>
<td>Notes</td>
<td>Transported animals carry other organisms in their coat, on thier gut and in soil on their hooves and feet. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>contaminantOnAnimals</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p025"></a>Term Name dwcpw:p025</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p025">http://rs.tdwg.org/dwc/pw/p025</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p025-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p025-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>parasites on animals (including organisms transported by host and vector)</td>
</tr>
<tr>
<td>Definition</td>
<td>Parasitic and pathogenic organisms transported with their host or vector animal.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>parasitesOnAnimals</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p026"></a>Term Name dwcpw:p026</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p026">http://rs.tdwg.org/dwc/pw/p026</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p026-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p026-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>contaminant on plants (except parasites, species transported by host/vector)</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported on plant material.</td>
</tr>
<tr>
<td>Usage</td>
<td>This term excludes organisms carried on contaminant nursery material, seed contaminants, and the timber trade, which SHOULD be classified under their own pathway terms.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>contaminantOnPlants</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p027"></a>Term Name dwcpw:p027</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p027">http://rs.tdwg.org/dwc/pw/p027</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p027-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p027-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>parasites on plants (including species transported by host and vector)</td>
</tr>
<tr>
<td>Definition</td>
<td>Parasitic and pathogenic organisms transported with their host or vector plant.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>parasitesOnPlants</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p028"></a>Term Name dwcpw:p028</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p028">http://rs.tdwg.org/dwc/pw/p028</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p028-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p028-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>seed contaminant</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms contaminating transported seeds.</td>
</tr>
<tr>
<td>Notes</td>
<td>These may be parasites or pathogens of the seeds, seeds of other species not intended to be transported, or species that eat seeds. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>seedContaminant</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p029"></a>Term Name dwcpw:p029</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p029">http://rs.tdwg.org/dwc/pw/p029</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p029-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p029-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>timber trade</td>
</tr>
<tr>
<td>Definition</td>
<td>Contaminants on unprocessed timber, processed wood and wood derived products.</td>
</tr>
<tr>
<td>Usage</td>
<td>This term excludes packing material and habitat material made from wood that SHOULD be included under their own terms ("packing material" and "transportation of habitat material").</td>
</tr>
<tr>
<td>Notes</td>
<td>Examples include wooden furniture, saw dust and fire wood. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>timberTrade</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p030"></a>Term Name dwcpw:p030</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p030">http://rs.tdwg.org/dwc/pw/p030</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p030-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p030-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>transportation of habitat material (soil, vegetation, wood etc)</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported with their habitat material to a new location.</td>
</tr>
<tr>
<td>Notes</td>
<td>Examples include materials such as soil, vegetation, straw and wood chips. Unless these materials are sterilised the organisms can be transported with their habitat to a new location. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>transportationHabitatMaterial</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p047">dwcpw:p047</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p031"></a>Term Name dwcpw:p031</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p031">http://rs.tdwg.org/dwc/pw/p031</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p031-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p031-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>angling/fishing equipment</td>
</tr>
<tr>
<td>Definition</td>
<td>Aquatic organisms moved between sites on equipment of recreational anglers and professional fishermen.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>fishingEquipment</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p032"></a>Term Name dwcpw:p032</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p032">http://rs.tdwg.org/dwc/pw/p032</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p032-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p032-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>container/bulk</td>
</tr>
<tr>
<td>Definition</td>
<td>Stowaways transported in or on the cargo containers or bulk cargo units themselves.</td>
</tr>
<tr>
<td>Notes</td>
<td>The difference between this category and others, such as "hitchhikers on ship/boat", is that the organism embarked and disembarked from the container itself rather than the ship. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>containerBulk</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p033"></a>Term Name dwcpw:p033</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p033">http://rs.tdwg.org/dwc/pw/p033</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p033-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p033-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>hitchhikers in or on airplane</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms that enter airplanes or other aircraft, such as helicopters, and are transported by them to another location.</td>
</tr>
<tr>
<td>Notes</td>
<td>This term does not apply to organisms that embarked onto containers that were subsequently loaded on to an aircraft. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>hitchhikersAirplane</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p034"></a>Term Name dwcpw:p034</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p034">http://rs.tdwg.org/dwc/pw/p034</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p034-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p034-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>hitchhikers on ship/boat (excluding ballast water and hull fouling)</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms that enter directly onto boats or ships and are transported by them to another location.</td>
</tr>
<tr>
<td>Notes</td>
<td>This term does not apply to organisms that embarked containers that are subsequently loaded on the ship, nor to contaminents of products loaded on the ship. The term is intended for organisms that directly interact with the boat or ship. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>hitchhikersShip</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p035"></a>Term Name dwcpw:p035</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p035">http://rs.tdwg.org/dwc/pw/p035</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p035-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p035-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>machinery/equipment</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms carried on the surfaces of or within heavy machinery and equipment.</td>
</tr>
<tr>
<td>Notes</td>
<td>This includes military equipment, farm machinery and manufacturing equipment. This term does not include products carried by vehicles. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>machineryEquipment</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p036"></a>Term Name dwcpw:p036</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p036">http://rs.tdwg.org/dwc/pw/p036</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p036-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p036-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>people and their luggage/equipment (in particular tourism)</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported on people themselves and/or their personal luggage.</td>
</tr>
<tr>
<td>Usage</td>
<td>This term excludes recreational angling equipment, which SHOULD be classified under its own term ("angling/fishing equipment"). </td>
</tr>
<tr>
<td>Notes</td>
<td>Examples include organisms transported by tourists. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>people</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p037"></a>Term Name dwcpw:p037</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p037">http://rs.tdwg.org/dwc/pw/p037</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p037-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p037-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>organic packing material, in particular wood packaging</td>
</tr>
<tr>
<td>Definition</td>
<td>Organic material, particularly unprocessed plant material that is used to pack transported goods.</td>
</tr>
<tr>
<td>Notes</td>
<td>Examples include woodern pallets, boxes, bags and baskets. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>packingMaterial</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p038"></a>Term Name dwcpw:p038</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p038">http://rs.tdwg.org/dwc/pw/p038</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p038-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p038-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>ship/boat ballast water</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported within the water pumped into boats and ships to provide ballast.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>ballastWater</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p039"></a>Term Name dwcpw:p039</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p039">http://rs.tdwg.org/dwc/pw/p039</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p039-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p039-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>ship/boat hull fouling</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms that attach themselves to the subsurface hull of boats and ships. </td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>hullFouling</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p040"></a>Term Name dwcpw:p040</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p040">http://rs.tdwg.org/dwc/pw/p040</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p040-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p040-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>vehicles (car, train, etc.)</td>
</tr>
<tr>
<td>Definition</td>
<td>Other vehicle hitchhikers that have been unintentionally dispersed, but are not covered by other terms.</td>
</tr>
<tr>
<td>Notes</td>
<td>These organisms may be carried on or within the vehicle. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>vehicles</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p041"></a>Term Name dwcpw:p041</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p041">http://rs.tdwg.org/dwc/pw/p041</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p041-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p041-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>other means of transport</td>
</tr>
<tr>
<td>Definition</td>
<td>A catchall term for any transport related dispersal that is not covered in other terms.</td>
</tr>
<tr>
<td>Notes</td>
<td>Examples include the movement of offshore installations, such as drilling platforms, but also pipeline and cable transport. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>otherTransport</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p048">dwcpw:p048</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p042"></a>Term Name dwcpw:p042</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p042">http://rs.tdwg.org/dwc/pw/p042</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p042-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p042-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>interconnected waterways/basins/seas</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms that dispersed through artificial waterways created to connect previosuly unconnected water bodies. </td>
</tr>
<tr>
<td>Usage</td>
<td>Organisms transported along these corridors in ballast, on as hull fouling SHOULD be categorised under the "ship/boat ballast water" or "ship/boat hull fouling" terms. </td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>waterwaysBasinsSeas</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p049">dwcpw:p049</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p043"></a>Term Name dwcpw:p043</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p043">http://rs.tdwg.org/dwc/pw/p043</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p043-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p043-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>tunnels and land bridges</td>
</tr>
<tr>
<td>Definition</td>
<td>Unintentional dispersal by organisms using artificial tunnels, bridges, roads and railways.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>tunnelsBridges</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p049">dwcpw:p049</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p044"></a>Term Name dwcpw:p044</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p044">http://rs.tdwg.org/dwc/pw/p044</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p044-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p044-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>natural dispersal across borders of invasive alien organisms</td>
</tr>
<tr>
<td>Definition</td>
<td>Dispersal of organisms to new regions by natural dispersal from regions in which they are alien.</td>
</tr>
<tr>
<td>Notes</td>
<td>These are alien species that have previously been introduced through one of these pathways: release in nature, excape from confinement, transport-contaminant, transport-stowaway, or corridor. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>naturalDispersal</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p050">dwcpw:p050</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p045"></a>Term Name dwcpw:p045</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p045">http://rs.tdwg.org/dwc/pw/p045</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p045-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p045-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>release in nature</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms transported and released by humans in a (semi)natural environment with the intention that they should live their without further human aid.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>releaseInNature</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p051">dwcpw:p051</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p046"></a>Term Name dwcpw:p046</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p046">http://rs.tdwg.org/dwc/pw/p046</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p046-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p046-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>escape from confinement</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms intentionally transported by humans and intended to be kept in captivity or cultivation, but having inadvertently escaped from human control.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>escapeFromConfinement</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p051">dwcpw:p051</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p047"></a>Term Name dwcpw:p047</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p047">http://rs.tdwg.org/dwc/pw/p047</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p047-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p047-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>transport-contaminant</td>
</tr>
<tr>
<td>Definition</td>
<td>An umbrella term for all species transported as contaninents in other products.</td>
</tr>
<tr>
<td>Notes</td>
<td>An alien species is a contaminant if it had a trophic or biotic relationship to organisms or items being transported and was to some extent dependent on them for survival. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>transportContaminant</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p052">dwcpw:p052</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p048"></a>Term Name dwcpw:p048</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p048">http://rs.tdwg.org/dwc/pw/p048</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p048-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p048-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>transport-stowaway</td>
</tr>
<tr>
<td>Definition</td>
<td>An umbrella term for all species transported by riding on forms of transport where the organism has a direct interation with the transport and is not merely carried as part of, or a contaminent of cargo.</td>
</tr>
<tr>
<td>Notes</td>
<td>A stowaway has no trophic or biotic relationship to the organisms or items being transported. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>transportStowaway</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p052">dwcpw:p052</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p049"></a>Term Name dwcpw:p049</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p049">http://rs.tdwg.org/dwc/pw/p049</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p049-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p049-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>corridor</td>
</tr>
<tr>
<td>Definition</td>
<td>Infrastructure, such as bridges, tunnels and canals have removed natural barriers to dispersal and allowed a species to move into a novel location.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>corridor</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p053">dwcpw:p053</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p050"></a>Term Name dwcpw:p050</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p050">http://rs.tdwg.org/dwc/pw/p050</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p050-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p050-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>unaided</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms that spread by natural dispersal, without action or assistance by humans, from regions in which they are also alien.</td>
</tr>
<tr>
<td>Notes</td>
<td>The term refers to secondary dispersal from an area where the taxon is also alien. See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>unaided</td>
</tr>
<tr>
<td>Has broader concept</td>
<td><a href="#dwcpw_p053">dwcpw:p053</a></td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p051"></a>Term Name dwcpw:p051</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p051">http://rs.tdwg.org/dwc/pw/p051</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p051-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p051-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>intentional</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms were brought to new area with the specific intention of keeping them alive in the new region, regardless of whether they were intended to be cultivated or released into the wild.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>intentional</td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p052"></a>Term Name dwcpw:p052</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p052">http://rs.tdwg.org/dwc/pw/p052</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p052-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p052-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>unintentional</td>
</tr>
<tr>
<td>Definition</td>
<td>The organism was unintentionally brought to a new region.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>unintentional</td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th colspan="2"><a id="dwcpw_p053"></a>Term Name dwcpw:p053</th>
</tr>
</thead>
<tbody>
<tr>
<td>Term IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/p053">http://rs.tdwg.org/dwc/pw/p053</a></td>
</tr>
<td>Modified</td>
<td>2020-06-28</td>
</tr>
<tr>
<td>Term version IRI</td>
<td><a href="http://rs.tdwg.org/dwc/pw/version/p053-2020-06-28">http://rs.tdwg.org/dwc/pw/version/p053-2020-06-28</a></td>
</tr>
<tr>
<td>Label</td>
<td>corridor and dispersal</td>
</tr>
<tr>
<td>Definition</td>
<td>Organisms dispersed naturally, even if that dispersal was aided by changes in the landscape created by humans.</td>
</tr>
<tr>
<td>Notes</td>
<td>See also Harrower et al. 2017 <a href="http://nora.nerc.ac.uk/id/eprint/519129">http://nora.nerc.ac.uk/id/eprint/519129</a></td>
</tr>
<tr>
<td>Controlled value</td>
<td>corridorAndDispersal</td>
</tr>
<tr>
<td>Type</td>
<td>Concept</td>
</tr>
</tbody>
</table>
###Markdown
Modify to display the indices that you want
###Code
text = index_by_label + term_table
#text = index_by_name + index_by_label + term_table
# read in header and footer, merge with terms table, and output
headerObject = open(headerFileName, 'rt', encoding='utf-8')
header = headerObject.read()
headerObject.close()
footerObject = open(footerFileName, 'rt', encoding='utf-8')
footer = footerObject.read()
footerObject.close()
output = header + text + footer
outputObject = open(outFileName, 'wt', encoding='utf-8')
outputObject.write(output)
outputObject.close()
print('done')
###Output
done
|
src/notebook/Predictions_2d.ipynb | ###Markdown
Comparaisons des meilleures/pires prédictions
###Code
%reset -f
import numpy as np
import pandas as pd
import ast
import matplotlib.pyplot as plt
from ast import literal_eval as l_eval
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
%matplotlib nbagg
###Output
_____no_output_____
###Markdown
Chargement des données
###Code
""" Chargement de la dataframe (qui contient 1 seule ligne à priori) """
converters={'rho':l_eval, 'E_u':l_eval, 'F_u':l_eval, 'T_u':l_eval, 'E_d':l_eval, 'F_d':l_eval, 'T_d':l_eval, 'E_l':l_eval, 'F_l':l_eval, 'T_l':l_eval, 'E_r':l_eval, 'F_r':l_eval, 'T_r':l_eval}
df_true = pd.read_csv("../../data/df_true.csv", converters=converters)
df_pred = pd.read_csv("../../data/df_pred.csv", converters=converters)
x_min = df_true.loc[0, "x_min"]
x_max = df_true.loc[0, "x_max"]
y_min = df_true.loc[0, "y_min"]
y_max = df_true.loc[0, "y_max"]
t_0 = df_true.loc[0, "t_0"]
t_f = df_true.loc[0, "t_f"]
N = df_true.loc[0, 'N']
M = df_true.loc[0, 'M']
step_count = df_true.loc[0, 'step_count']
print("x_min, x_max:", (x_min, x_max))
print("y_min, y_max:", (y_min, y_max))
print("t_0, t_f :", (t_0, t_f))
print()
print("taille du maillage :", (N, M))
print("nombre d'itérations:", step_count)
df_true["rho_attr"]
df_pred
###Output
_____no_output_____
###Markdown
Sous Imshow
###Code
""" Un plot de la densite """
# pour calculer les valeurs extremes d'un tenseur
def min_max(mat, dim=2):
mat_min = mat
for i in range(dim-1, -1, -1):
mat_min = np.nanmin(mat_min, axis=i)
mat_max = mat
for i in range(dim-1, -1, -1):
mat_max = np.nanmax(mat_max, axis=i)
return mat_min, mat_max
# pour faire les plots
def plot_density(ax, df_true, df_pred, index=0, cb=True):
rho_true = np.array(df_true.loc[0, 'rho'])
true_min, true_max = min_max(rho_true)
print("(min, max) true rho =", (true_min, true_max))
rho_pred = np.array(df_pred.loc[0, 'rho'])
pred_min, pred_max = min_max(rho_pred)
print("(min, max) pred rho =", (pred_min, pred_max))
rho_min, rho_max = min(true_min, pred_min), max(true_max, pred_max)
# rho_min, rho_max = pred_min, pred_max
# print("(min, max) rho =", (rho_min, rho_max))
# print(rho_true - rho_pred)
img1 = ax.imshow(rho_true,
origin='lower',
cmap=cm.Greens,
interpolation='bicubic',
aspect='auto',
alpha=5,
# vmin=rho_min, vmax=rho_max,
extent=[x_min, x_max, y_min, y_max])
img2 = ax.imshow(rho_pred,
origin='lower',
cmap=cm.Reds,
interpolation="bicubic",
aspect='auto',
alpha=0.5,
# vmin=rho_min, vmax=rho_max,
extent=[x_min, x_max, y_min, y_max])
if cb == True:
# cbar2 = fig.colorbar(img2, ax=[ax], ticks=[pred_min, pred_max], orientation="horizontal", cax=fig.add_axes([0.1, 0.1, 0.2, 0.01]))
cbar2 = fig.colorbar(img2, ax=[ax], ticks=[pred_min, pred_max], shrink=0.4, aspect=10, location='right')
# cbar2 = fig.colorbar(img2, ax=ax, ticks=[pred_min, pred_max], cax=fig.add_axes([0.92, 0.2, 0.01, 0.2]))
cbar2.ax.set_yticklabels([str(pred_min), str(pred_max)[:4]])
cbar2.set_label('Prédiction')
# cbar1 = fig.colorbar(img1, ax=[ax], ticks=[true_min, true_max], orientation="horizontal", cax=fig.add_axes([0.6, 0.1, 0.2, 0.01]))
cbar1 = fig.colorbar(img1, ax=[ax], ticks=[true_min, true_max], shrink=0.4, aspect=10, location='right')
# cbar1 = fig.colorbar(img1, ax=ax, ticks=[true_min, true_max], shrink=0.25, aspect=10, cax=fig.add_axes([0.8, 0.6, 0.01, 0.2]))
cbar1.ax.set_yticklabels([str(true_min), str(true_max)[:4]])
cbar1.set_label('Label')
ax.set_xlabel("x", size="large")
ax.set_ylabel("y", size="large")
fig, ax = plt.subplots(1,1,figsize=(9,5.4))
plot_density(ax, df_true, df_pred, index=0)
# plt.tight_layout()
###Output
_____no_output_____
###Markdown
Calcul de l'erreur absolue
###Code
true = [0.405,0.724,1.95]
pred = [0.409,0.736,1.9]
true = np.array(true)
pred = np.array(pred)
def norm(vec):
return vec[0]**2 + vec[1]**2 + (vec[2] / 10)**2
print("Erreur absolue:", round(norm(true-pred), 4))
# def plot_density3D(ax, df_true, df_pred, type="surface", cmap="viridis", stride=10):
# # make the time axis, x , y
# x = np.linspace(df_true["x_min"], df_true["x_max"], df_true["N"])
# y = np.linspace(df_true["y_min"], df_true["y_max"], df_true["M"])
# XX, YY = np.meshgrid(x, y)
# # make the signals
# rho_true = np.array(df_true.loc[0, "rho"])
# rho_pred = np.array(df_pred.loc[0, "rho"])
# if type=="surface":
# ax.plot_surface(XX, YY, rho_true, cmap=cm.Greens, edgecolor='none')
# ax.plot_surface(XX, YY, rho_pred, cmap=cm.Reds, edgecolor='none')
# elif type=="wireframe":
# ax.plot_wireframe(XX, YY, rho_true, rstride=stride, cstride=stride)
# ax.plot_wireframe(XX, YY, rho_pred, rstride=stride, cstride=stride)
# ax.set_xlabel('abscisse')
# ax.set_ylabel('ordonnée')
# ax.set_zlabel('hauteur')
# ax.view_init(0, 45)
# # ax.view_init(90, 0)
# fig = plt.figure(figsize=(8,8))
# ax = fig.add_subplot(111, projection='3d')
# plot_density3D(ax, df_true, df_pred, type="surface", cmap="jet")
# # plt.legend()
# plt.tight_layout()
###Output
_____no_output_____ |
Model Selection - Tuning Hyperparameters/RandomizedSearchCV_HyperParameterTuning.ipynb | ###Markdown
2. Hyperparameter tuning with RandomizedSearchCV Scikit-Learn's [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) allows us to randomly search across different hyperparameters to see which work best. It also stores details about the ones which work best we create a grid (dictionary) of hyperparameters we'd like to search over.
###Code
# Hyperparameter grid RandomizedSearchCV will search over
# Hyperparameters --> keys
# values we want to try
# grid is a dictionoary
grid = {"n_estimators": [10, 100, 200, 500, 1000, 1200],
"max_depth": [None, 5, 10, 20, 30],
"max_features": ["auto", "sqrt"],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 4]}
###Output
_____no_output_____
###Markdown
Where did these values come from?. They're made up.Yes. Not completely pulled out of the air but after reading the Scikit-Learn documentation on Random Forest's you'll see some of these values have certain values which usually perform well and certain hyperparameters take strings rather than integers. Now we've got the grid setup, Scikit-Learn's RandomizedSearchCV will look at it, pick a random value from each, instantiate a model with those values and test each model.How many models will it test?As many as there are for each combination of hyperparameters to be tested. max_depth has 4, max_features has 2, min_samples_leaf has 3, min_samples_split has 3, n_estimators has 5. That's 4x2x3x3x5 = 360 models!Or...We can set the n_iter parameter to limit the number of models RandomizedSearchCV tests. The best thing? The results we get will be cross-validated (hence the CV in RandomizedSearchCV) so we can use train_test_split().And since we're going over so many different models, we'll set n_jobs to 1 of RandomForestClassifier so Scikit-Learn takes advantage of all the cores (processors) on our computers. **Note**: Depending on n_iter (how many models you test), the different values in the hyperparameter grid, and the power of your computer, running the cell below may take a while.
###Code
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42) # Results are reproducable
# Shuffle the data
heart_df_shuffle = heart_df.sample(frac=1)
# Split into X and y
X = heart_df_shuffle.drop("target",axis=1)
y = heart_df_shuffle["target"]
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate RandomForestClassifier
# n_jobs --> how much of ur computer processor are u going to dedicate towards the machine learning model
# 1 --> means all
clf = RandomForestClassifier(n_jobs=1)
# Setup RandomizedSearchCV --> cross validation - automatically creates the validation sets for us
rs_clf = RandomizedSearchCV(estimator=clf,
param_distributions=grid,
n_iter=10, # try 20 models total
cv=5, # 5-fold cross-validation
verbose=2) # print out results
# it will take clf and grid, then search over grid for different types of the hyperparameters combinations at random (which works best)
# Fit the RandomizedSearchCV version of clf
rs_clf.fit(X_train, y_train);
# Fitting 5 folds for each of 10 candidates, totalling 50 fits means,
# 10 iterations of different combinations of parameters in grid
# Splitting each combination 5 times, cv = 5
# fit function is run 50x times using different hyperparamters on different sets of data
# Which combination of hyperparameters got the best results found by RandomizedSearchCV
rs_clf.best_params_
###Output
_____no_output_____
###Markdown
when we call predict() on rs_clf (our RandomizedSearchCV version of our classifier), it'll use the best hyperparameters it found.
###Code
def evaluate_preds(y_true,y_preds):
"""
Performs evaluation comparison on y_true labels vs. y_pred labels
on a classification model.
"""
accuracy = accuracy_score(y_true,y_preds)
precision = precision_score(y_true,y_preds)
recall = recall_score(y_true,y_preds)
f1 = f1_score(y_true,y_preds)
metric_dict = {
"accuracy":round(accuracy,2),
"precision":round(precision,2),
"recall":round(recall,2),
"f1":round(f1,2)
} # A dictionary that stores the results of the evaluation metrics
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}")
return metric_dict
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Make predictions with the best hyperparameters
rs_y_preds = rs_clf.predict(X_test)
# Evaluate the predictions
rs_metrics = evaluate_preds(y_test, rs_y_preds)
# ------------------------------------------------------------------
###Output
_____no_output_____
###Markdown
A few next ideas you could try:* Collecting more data - Based on the results our models are getting now, it seems like they're finding some patterns. Collecting more data may improve a models ability to find patterns. However, your ability to do this will largely depend on the project you're working on.* Try a more advanced model - Although our tuned Random Forest model is doing pretty well, a more advanced ensemble method such as [XGBoost](https://xgboost.ai/) or [CatBoost](https://catboost.ai/) might perform better. Since machine learning is part engineering, part science, **these kind of experiments are common place in any machine learning project.**
###Code
# ---------------------------------------------------------------
###Output
_____no_output_____ |
toc_trends_oct_2016.ipynb | ###Markdown
TOC trends October 2016Heleen would like updated results for the ICPW trends analysis at the beginning of October. Today is the last day of September and I'm committed to other projects for the next few weeks, so time is getting tight! The aim of this notebook is to get some more-or-less complete results that Heleen can work with while I'm away next week.**Note:** There are still a number of significant issues with the database that I am unlikely to have time to fix in the near future. Although the ICPW dataset itself is pretty simple, cleaning up the existing system is not a small job, due to a long history of database structural development and, in particular, the way that RESA2 is coupled to various other systems. I'm not going to attempt to deal with these issues here, but I'll try to describe them below so I can come back in the future. The aim here is to try to produce something useful *despite* the outstanding known (and unknown?) issues. 1. ICPW database summaryMost of the tasks disucssed with John, Don and Heleen back in May have been completed, as described [here](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning.ipynb), [here](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning2.ipynb) and [here](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/updated_toc_trends_analysis2.ipynb). Section 5 of the latter notebook provides a more detailed summary.On 25/08/2016, John sent a couple of very helpful e-mails in response to my earlier questions regarding the US sites. Unfortunately, comparing John's spreadsheets to the information in RESA2 highlighted a number of further database-wide errors (not specific to the US sites), which are going to be tricky to fix. A notebook describing this work in detail is [here](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning3.ipynb), but the main implications are summarised in an e-mail to Heleen (sent 02/09/2016 at 11:42), which is reproduced in part below.> *"...it is not possible to reliably extract complete water chemistry time series from RESA2, even for just a single chemical parameter at a single site. This is because, if you choose a site that appears in both the core ICPW programme and in the wider trends analysis (which is most of them), you will find two time series in the database: one associated with 'ICPWaters' and another linked to 'TOC_TRENDS'. These two series will likely have different start and end points and different periods of missing data. There will also be a lot of overlap, and most of the overlapping values will agree, but in a few places they will be substantially different. What's more, at present the database will often report significantly different site properties for the two time series (i.e. different station names, geographic co-ordinates and catchment properties), despite the fact that the samples actually all come from a single location. All this means that if you want to look at a complete water chemistry time series for any site in ICP Waters, the only way to do it at the moment is to extract both series, manually merge them (e.g. in Excel) and then try to figure out which set of site properties is correct.* > *[...]*> *At present it is possible for someone using RESA2 to extract two different time series and two sets of site properties for a single location. This is pretty confusing (as we found out back in May), and it also somewhat defeats the point of having a database."*Other outstanding issues include the large number of Swedish sites with no data, problems with the Czech data (in an e-mail received 08/09/2016, Vladimir suggested just deleting and reloading everything) and the poor availability of site metadata for many locations. Having discussed these issues with Heleen, I've decided to try the following: * Tidy up the US sites based on information provided by John. I'm not going to attempt to merge any datasets at this stage, but I can clean up the site names and add the original USEPA site codes to reduce confusion. * Look at the Czech data sent by Vladimir and decide whether it can and should replace the values currently in the database. * Run the trends analysis using whatever data is currently associated with the `TOC_TRENDS` projects. In principle, these datasets were gathered together separately from ICPW for the 2015 reanalysis, which is why they're associated with separate projects in the database. As described in the notebook linked above, the values in these series sometimes do not agree with those reported for the core ICPW projects, but I'm going to ignore this for now as reconciling the differences will take a long time.
###Code
# Import custom functions and connect to db
resa2_basic_path = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Upload_Template'
r'\useful_resa2_code.py')
resa2_basic = imp.load_source('useful_resa2_code', resa2_basic_path)
engine, conn = resa2_basic.connect_to_resa2()
###Output
_____no_output_____
###Markdown
2. Tidy up US sitesBased on the results in the [previous notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning3.ipynb), I'd like to go through all the US sites in RESA2 and modify the site properties to match the values in John's spreadsheet (which I'm taking to be definitive). In particular, I want to correct the station names and geographic co-ordinates, as well as appending the original USEPA site codes to the station metadata. 2.1. Core ICPW sitesWe'll start by correcting the core ICPW sites. John's spreadsheet identified 95 sites that should be associated with this project and, following the work in the [previous notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning3.ipynb), this now agrees with what's in RESA2. Four of the core ICPW sites in John's spreadsheet are marked `NA` in the `station_code` column. These sites do now have codes in RESA2, so I've created a new version of John's spreadsheet (*U.S.Site.Reconciliation.August.2016_jes.xlsx*) with these codes added. The first step is therefore to check that there is a direct match between codes for the 95 core sites in RESA2 and the the 95 sites listed in John's spreadsheet.
###Code
# Read John's spreadsheet
in_xls = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Call_for_Data_2016'
'\Replies\usa\U.S.Site.Reconciliation.August.2016_jes.xlsx')
j_df = pd.read_excel(in_xls, sheetname='Sheet1')
# Get a subset of columns from John's spreadsheet
us_df = j_df[['Station Code', 'Station name', 'Suggested Name',
'Active', 'INCLUDE IN ICP WATERS DATABASE',
'INCLUDE IN ICP DOC ANALYSIS', 'NFC_SITEID',
'NFC_SITENAME', 'Latitude', 'Longitude', 'Altitude (m)']]
# Rename columns
us_df.columns = ['station_code', 'station_name', 'suggested_name',
'active', 'core', 'trend', 'nfc_code', 'nfc_name',
'latitude', 'longitude', 'elevation']
# Just the core sites
core_df = us_df.query('core == "YES"')
core_df.head()
# Get station codes associated with 'ICP Waters US' project
sql = ('SELECT station_id, station_code, station_name, latitude, longitude '
'FROM resa2.stations '
'WHERE station_id in (SELECT station_id '
'FROM resa2.projects_stations '
'WHERE project_id = 1679)')
r2_df = pd.read_sql_query(sql, engine)
r2_df.head()
# Join
df = r2_df.merge(core_df, on='station_code', how='outer',
suffixes=('_r', '_j'))
if len(df) == 95:
print 'All rows match.'
df.head()
###Output
All rows match.
###Markdown
The code above shows that all the sites have been matched correctly, so I can now loop over each site in the `ICP Waters US` project, updating the details in the stations table using the information in John's spreadsheet.I also want to add the original National Focal Centre site code as an additional attribute (as John suggested in his e-mails). The easiest way to do this is to create a new entry (`NFC_Code`) in the `STATION_PARAMETER_DEFINITIONS` table.
###Code
# Loop over sites
for row in df.iterrows():
# Get site properties
stn_id = row[1]['station_id']
name = row[1]['suggested_name']
lat = row[1]['latitude_j']
lon = row[1]['longitude_j']
elev = row[1]['elevation']
nfc = row[1]['nfc_code']
# Update stations table
sql = ("UPDATE resa2.stations "
"SET station_name = '%s', "
"latitude = %s, "
"longitude = %s, "
"altitude = %s "
"WHERE station_id = %s"
% (name, lat, lon, elev, stn_id))
result = conn.execute(sql)
# Update stations_par_values table with NFC code
sql = ("INSERT INTO resa2.stations_par_values "
"(station_id, var_id, value, entered_by, entered_date) "
"VALUES (%s, 321, '%s', 'JES', TO_DATE('2016-09-30', 'YYYY-MM-DD'))"
% (stn_id, nfc))
result = conn.execute(sql)
###Output
_____no_output_____
###Markdown
2.2. Trends sitesThe next step is to correct the entries for the trends sites. Note that if the database was properly normalised this step wouldn't be necessary, as the 76 trends sites are a sub-set of the 95 core ICPW sites, so the steps described above should cover all relevant sites. However, due to the duplication of sites in the database, it is necessary to do this cleaning twice.Matching the trends sites turns out to be a bit more difficult, because of inconsistencies in the `X15:` prefixes. The first step is to manually add `Newbert Pond, Maine` (`station_code = US82`) to the `ICPW_TOCTRENDS_2015_US_LTM` project, as mentioned at the end of the [previous notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning3.ipynb). This should mean that we have 76 sites associated with the trends project, as specified in John's spreadsheet.
###Code
# Just the trend sites
trend_df = us_df.query('trend == "YES"')
trend_df.head()
# Get station codes associated with 'ICPW_TOCTRENDS_2015_US_LTM' project
sql = ('SELECT station_id, station_code, station_name, latitude, longitude '
'FROM resa2.stations '
'WHERE station_id in (SELECT station_id '
'FROM resa2.projects_stations '
'WHERE project_id = 3870)')
r2_df = pd.read_sql_query(sql, engine)
print 'Number of sites:', len(r2_df)
r2_df.head()
###Output
Number of sites: 76
###Markdown
The next question is how well we can match these sites to John's spreadsheet. Based on the analysis in the previous notebook, the answer is, "*not very well*", because leading and trailing zeros in the original site codes have been truncated (presumably accidently, e.g. in Excel) prior to the `X15:` prefix being added. This isn't necessarily a major problem - the station codes used in RESA2 are mostly irrelevant - but I do need to somehow match them to John's spreadsheet and then update the station properties (preferably also adding the original site codes supplied by John, so we don't have to go through all this again later). The code below takes a slightly "heuristic" approach to finding matches.
###Code
# Loop over rows from John
for row in trend_df.iterrows():
# Get site properties
nfc_cd = row[1]['nfc_code']
name = row[1]['suggested_name']
lat = row[1]['latitude']
lon = row[1]['longitude']
elev = row[1]['elevation']
# Attempt to find match. Need to add 'X15:' and allow for variants
q_res = r2_df[(r2_df['station_code']=='X15:%s' % nfc_cd) |
(r2_df['station_code']=='X15:%s' % nfc_cd[1:]) |
(r2_df['station_code']=='X15:%s' % nfc_cd[:-1])]
if len(q_res) == 1:
# Single match found. Get stn_id
stn_id = q_res.iloc[0]['station_id']
# Update stations table
sql = ("UPDATE resa2.stations "
"SET station_name = '%s', "
"latitude = %s, "
"longitude = %s, "
"altitude = %s "
"WHERE station_id = %s"
% (name, lat, lon, elev, stn_id))
result = conn.execute(sql)
# Check whether there's already an entry for this site
# in stations_par_values table
sql = ('SELECT * FROM resa2.stations_par_values '
'WHERE station_id = %s '
'AND var_id = 321'
% stn_id)
df = pd.read_sql_query(sql, engine)
if len(df) < 1:
# Update stations_par_values table with NFC code
sql = ("INSERT INTO resa2.stations_par_values "
"(station_id, var_id, value, entered_by, entered_date) "
"VALUES (%s, 321, '%s', 'JES', TO_DATE('2016-09-30', 'YYYY-MM-DD'))"
% (stn_id, nfc_cd))
result = conn.execute(sql)
else:
# Can't get good match
print "Can't match %s." % nfc_cd
###Output
Can't match 1E1-134E.
Can't match ME-9998E.
###Markdown
This code manages to find unique matches for all but two of the sites, which is a good start. Looking at the site codes for the two exceptions in John's spreadsheet, it seems as though they were previously only associated with core ICPW project and not the broader trends analysis. They were therefore not duplicated when the `TOC Trends` projects were created and instead only appear in the database once, using station codes `US74` and `US82`, respectively (rather than any of the `X15:` stuff). 2.3. TestingWith a bit of luck, I've finally managed to sort out the basic details for the US sites. Let's check.
###Code
# Get the NFC site codes
sql = ('SELECT station_id, value AS nfc_code '
'FROM resa2.stations_par_values '
'WHERE var_id =321')
nfc_df = pd.read_sql_query(sql, engine)
# Get station codes associated with 'ICP Waters US' project
sql = ('SELECT station_id, station_code, station_name, latitude, longitude, altitude '
'FROM resa2.stations '
'WHERE station_id in (SELECT station_id '
'FROM resa2.projects_stations '
'WHERE project_id = 1679)')
core_df = pd.read_sql_query(sql, engine)
# Get station codes associated with 'ICPW_TOCTRENDS_2015_US_LTM' project
sql = ('SELECT station_id, station_code, station_name, latitude, longitude, altitude '
'FROM resa2.stations '
'WHERE station_id in (SELECT station_id '
'FROM resa2.projects_stations '
'WHERE project_id = 3870)')
trend_df = pd.read_sql_query(sql, engine)
# Join in original site codes
core_df = core_df.merge(nfc_df, on='station_id', how='left')
trend_df = trend_df.merge(nfc_df, on='station_id', how='left')
print 'Sites in core ICPW project:', len(core_df)
print 'Sites in trends project: ', len(trend_df)
###Output
Sites in core ICPW project: 95
Sites in trends project: 76
###Markdown
Regardless of the station codes and IDs in RESA2, I should now be able to make sense of the US data using the actual USEPA codes. For example, the sites in `trend_df` should be a true sub-set of those in `core_df`, and all the site properties should agree.
###Code
# Inner join dfs
df = trend_df.merge(core_df, on='nfc_code', how='inner',
suffixes=('_t', '_c'))
# Testing
assert len(df) == 76, 'Incorrect number of sites.'
for item in ['station_name', 'latitude', 'longitude', 'altitude']:
assert (df[item + '_t'] == df[item + '_c']).all(), 'Mismatch in %ss.'
print 'Check complete. All properties match.'
###Output
Check complete. All properties match.
###Markdown
Great - I think the US LTM sites are now more-or-less sorted. Only another 20 countires to go! Note that, with the changes made above, it is now possible to extract the NFC station codes from RESA2 in the same way as for the other station properties (i.e. using the `Additional station data` tab). There are still some issues to be aware of though. In particular, if you choose to export station properties from RESA2 to Excel, the RESA2 code will convert any all-numeric NFC codes to numbers. The result is that NFC codes such as `013425` are truncated as `12345`. This is not a problem with the database (the values in Oracle are correct) - it is a problem with the RESA2 code that copies results from the database into Excel. I'm not going to delve into this at the moment as I'm keen to avoid becoming too involved with the RESA2 application itself. As a workaround, it is safer to export from RESA2 as CSV and then import the CSV into Excel, taking care to set the column type for the `nfc_code` field to `Text` during the import process. 3. Czech dataVladimir has suggested deleting all the Czech data and then uploading it again from scratch. I'm reluctant to delete more than a decade's worth of data, but an alternative option more consistent with what's been done before would be to rename the existing Czech site codes and shift them into and `EXCLUDED` project. I can then upload the new Czech data using the same site codes as previously, which will hopefully avoid the issues created when the trend data was similarly uploaded, but using modified (i.e. `X15:`) prefixes.Firstly, some points to note: * The "old" database has Czech data for 9 sites. Two of these, Lysina (`CZ07`) and Pluhuv Bor (`CZ09`) have higher resolution data than the rest. See Section 3 of [this notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning2.ipynb) for full details. * In his latest data submission, Vladimir has only supplied **monthly** resolution data for sites `CZ01` to `CZ08` inclusive (i.e. excluding Pluhuv Bor). This is exactly what's required for the core ICPW dataset, but we may wish to include some of old (weekly) data from Lysina and Pluhuv Bor in the trends analysis (assuming it's considered good enough to use). For the moment, I propose **shifting all of the existing Czech data (sites `CZ01` to `CZ09`) into a new project called `ICPWaters CZ Excl`**. I will then upload the new data for sites `CZ01` to `CZ08` and associate these records with **both** the core ICPW project and the trend analysis work. Both projects will therefore access exactly the same (monthly resolution) data, which is more consistent than the data structure used at present. The downsides are that the trends project will then make use of lower resolution (monthly rather than weekly) data for Lysina (`CZ07`) and have no data at all for Pluhuv Bor (`CZ09`). Given that we are estimating trends from annual averages anyway, I don't think the differences in temporal resolution are a problem (in fact, using monthly data is arguably better as it's more consistent with our statistical analyses elsewhere). It is also possible to include Pluhuv Bor in the trends project if we want to, but Jakub has previously stated that it is a very well-buffered catchment that does not suffer from acidification (see e-mail received 29/06/2016 at 10:58), so I'm not sure it's appropriate anyway? **Check with Heleen**. 3.1. Restructure Czech projectsThe first step is to create a new project called `ICPWaters CZ Excl`. I can then rename all the existing Czech site codes (`CZ01` etc.) by adding the suffix `_Old`, followed by shifting them over to the newly created project. The second step is to create 8 new sites (with identical properties to the old ones), assigning them the same site codes as used previously (`CZ01` etc.). These new sites have no data associated with them, so I should be able to upload the revised data from Vladimir without creating any conflicts (I hope). All of this is most easily done manually using Access.**Note:** This process involves creating duplicate sites and is therefore superficially similar to the duplication described above, which has already caused lots of confusion. However, these problems have primarily been caused because we have two sets of sites (core and trends), which represent the same locations but which often have different site properties. More importantly, both of these projects are considered "active", and data is periodically appended to update them. This is what causes the problems: we have two supposedly identical datasets evolving in parallel, and over time differences emerge that are difficult to correct.For the Czech sites, I'm moving all the old data into an `EXCLUDED` project, marking the site names as `OLD`, and also adding a description to the `STATIONS` table saying they are `Associated with data supplied prior to August 2016`. In principle, I'd be happy to delete all of this data entirely (which would remove any concerns about duplication and database normalisation etc.), but I don't want to lose any information if I can avoid it. Besides, deleting records from RESA2 is not straightforward, due to all the other database interactions (a topic I'd like to avoid getting involved in for the moment). My hope is that the changes I'm making here will not cause further confusion, because the sites with the suffix `_Old` will be discontinued completely i.e. no new data will be associated with them and they won't be used in subsequent projects. This should avoid the messy situation that we're now in with sites from other countries. 3.2. Upload new dataWith the new project structure in place, I can use *upload_icpw_template.ipynb* to load the latest data from Vladimir into the database. This seems to have worked successfully, but for some reason the available parameters for the new sites are not showing up properly in the RESA2 application. They are there, and the values appear to be correct, but the `Refresh parameter list` button is not working as it used to. However, if the `STANDARD` button is pressed to select the routine parameters of interest, everything works as expected and the data can be exported to Excel as usual. The bit that's missing is that the user is no longer presented with the option of selecting custom parameters. I'm not sure what's happening here - another of the mysteries of RESA2! I'll have to **ask Tore where his code pulls this information from and modify the database accordingly**, but I'm not going to worry about this for now, as all my code will interact directly with the underlying Oracle database itself. 3.3. Data checkingTo check that my changes have worked as expected, I want to compare the data now in the database with what's in Vladimir's spreadsheet. To do this, I've manually extracted time series for the 8 Czech sites from RESA2 and saved them in *check_czech.xlsx*. The code below plots these values from RESA2 against the raw values in Valdimir's spreadsheet, which hopefully should agree.
###Code
# Read data
in_xlsx = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Call_for_Data_2016'
'\Replies\czech_republic\check_czech.xlsx')
r2_df = pd.read_excel(in_xlsx, sheetname='RESA')
vl_df = pd.read_excel(in_xlsx, sheetname='Orig')
# Define params of interest
params = ['pH', 'Ca', 'Mg', 'Na', 'K', 'Cl', 'SO4', 'ALK', 'TOC']
fig, axes = plt.subplots(nrows=9, ncols=8, figsize=(15, 20))
# Loop over data
for row, param in enumerate(params):
# Get data
df1 = r2_df[['Code', 'Date', param]]
df2 = vl_df[['Code', 'Date', param]]
# Pivot
df1 = df1.pivot(index='Date', columns='Code', values=param)
df2 = df2.pivot(index='Date', columns='Code', values=param)
# Join
df = df1.merge(df2, how='outer',
left_index=True, right_index=True)
for site in range(1, 9):
# Get data for this site
s_df = df[['CZ%02d_RESA' % site, 'CZ%02d_Orig' % site]]
s_df.dropna(how='any', inplace=True)
# Plot
s_df.plot(ax=axes[row, site-1], legend=False)
axes[row, site-1].set_title('CZ%02d %s' % (site, param))
plt.tight_layout()
plt.show()
###Output
C:\Data\64_Bit_WinPython\python-2.7.10.amd64\lib\site-packages\ipykernel\__main__.py:23: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
This looks proimising: there is only one colour visible on these plots, but two lines are being plotted, so I assume the results from RESA are overlapping exactly with those from Vladimir's spreadsheet. 4. Trends analysisThe final step for this notebook is to update my trends code and re-run the analysis. Note that **there are still some known issues with the data, so these results should not be taken as definitive**, but hopefully they are a step in the right direction. 4.1. Modify trends codeMy previous [trends notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/updated_toc_trends_analysis2.ipynb) left a number of loose ends, which need tidying up here: * My current code averages samples taken from 0 and 0.5 m depth. In his e-mail from 21/07/2016 at 14:54, Don recommended only using samples from 0.5 m. Changing this will require some careful coding, as many of the samples seem to have `depth = 0` as a kind of default, so if I select only those records where `depth = 0.5`, I'll end up missing a large chunk of the data. My algorithm needs to first select all the surface water data, and then choose the 0.5 m sample *only* in cases where there are duplicates (i.e. results for both 0.5 m and 0.0 m). * My original code used reference ratios for sea-salt corrections which I back-calculated from RESA2. In an e-mail received 16/08/2016 at 14:04 Heleen provided the true values used for the original analysis. I think these agree with my back-calculated values, but this needs checking. * My code does not currently calculate trends for the correct parameters. Based on Heleen's e-mail (see above), the main quantities of interest are: * ESO4 ($μeq/l$) * ESO4X ($μeq/l$) * ECl ($μeq/l$) * ESO4_ECl ($μeq/l$) * ECa_EMg ($μeq/l$) * ECaX_EMgX ($μeq/l$) * ENO3 ($μeq/l$) * TOC ($mgC/l$) * Al ($mg/l$) * ANC ($= Ca + Mg + K + Na + NH_4 - Cl - SO_4 - NO_3$, all in $μeq/l$) * ALK ($μeq/l$) * HPLUS ($μeq/l$) * Deal with duplicates. The [previous notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/updated_toc_trends_analysis2.ipynb) highlighted some other duplicates for which I couldn't find an explanation (see section 3.2.3). Suzanne hasn't responsed to my e-mails, so I'm not in a position to correct these issues at this stage. For now, **I propose to take the most recently uploaded of the duplicate values**, as these are usually the ones for which I can find a "paper-trail". * Choose a method for dealing with values at the detection limit. There are no hard-and-fast rules here and the choice is unlikely to dramatically influence trend results calculated using non-parametric statistics (i.e. Sen's slope calculated from annual medians). At present, my code simply substitutes the detection limit value, which is the easiest and most transparent approach. **I can change this if there are strong arguments for using an alternative method, though.** 4.1.1. Depths Some more careful checking of the database shows that this is actually a very minor problem. Records with measurements from both 0 m and 0.5 m only occur for a few of the Norwegian sites back in the 1970s. For these measurements, the chemistry values are the same regardless of which depth is chosen, so the problem is actually negligible. The code below finds all of the duplicate depth associated with all the ICPW trends sites.The issue is so limitied that I don't think it needs further consideration.
###Code
# Specify projects of interest
proj_list = ['ICPW_TOCTRENDS_2015_CA_ATL',
'ICPW_TOCTRENDS_2015_CA_DO',
'ICPW_TOCTRENDS_2015_CA_ICPW',
'ICPW_TOCTRENDS_2015_CA_NF',
'ICPW_TOCTRENDS_2015_CA_QU',
'ICPW_TOCTRENDS_2015_CZ',
'ICPW_TOCTRENDS_2015_Cz2',
'ICPW_TOCTRENDS_2015_FI',
'ICPW_TOCTRENDS_2015_NO',
'ICPW_TOCTRENDS_2015_SE',
'ICPW_TOCTRENDS_2015_UK',
'ICPW_TOCTRENDS_2015_US_LTM',
'ICPWaters Ca']
sql = ('SELECT station_id, station_code '
'FROM resa2.stations '
'WHERE station_id IN (SELECT UNIQUE(station_id) '
'FROM resa2.projects_stations '
'WHERE project_id IN (SELECT project_id '
'FROM resa2.projects '
'WHERE project_name IN %s))'
% str(tuple(proj_list)))
stn_df = pd.read_sql(sql, engine)
sql = ("SELECT water_sample_id, station_id, sample_date "
"FROM resa2.water_samples "
"WHERE station_id IN %s "
"AND depth1 <= 1 "
"AND depth2 <= 1" % str(tuple(stn_df['station_id'].values)))
df = pd.read_sql(sql, engine)
print 'Number of duplicated records:', df.duplicated(subset=['station_id', 'sample_date']).sum()
df[df.duplicated(subset=['station_id', 'sample_date'], keep=False)]
###Output
Number of duplicated records: 12
###Markdown
4.1.2. Reference ratiosThese have been checked and the values in my code agree with those in Heleen's e-mail. 4.1.3. Additional parametersI have modified the code to include ANC, calculated as $$ANC = Ca + Mg + K + Na + NH_4 - Cl - SO_4 - NO_3$$where all values are in $μeq/l$ (as per Heleen's e-mail - see above). However, note that not all sites have complete data for all these chemical parameters e.g. the Newfoundland sites do not report NH4. I believe NH4 is usually a fairly small component of ANC, so in my code I've decided to assume $NH_4 = 0$ unless it's explicitly specified. This means that I can still calculate ANC for the Newfoundland sites (which would otherwise all return "NoData"). **Is this reasonable? Are there any other chemical species that can safely be ignored when they're not reported? Check this with Heleen**.Including alkalinity is not straightforward, as there are many different methods and some of them are not fully described in the database. To make use of the alkalinity data I need to convert everything to a common scale and units (preferably in $\mu eq/l$). This can be done for some of the methods using the metadata in RESA2, but for others this is not possible. Having queried this with Heleen, she has suggested ignoring alkalinity for now (see e-mail received 17/10/2016 at 13:19), but this is something to return to later, possibly with help from Øyvind Garmo regarding how to align the various methods. For future reference, the methods in question are listed here:C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Data_Summaries\alkalin_method_issues.xlsx 4.1.4. DuplicatesWhere duplicates are reported which cannot be explained by different sampling depths (e.g. 0 m and 0.5 m, as above), my code now selects the value that has been **added to the database most recently**. There is relatively little justification for this decision: I haven't been able to get to the bottom of these issues and this step is really just a fudge (my code previously averaged these measurements, which is also dodgy). The main reason for choosing the most recent database addition is that I can generally find these values in the raw data, whereas the older measurements aren't so obvious. However, it's possible that this is more a reflection of my (still limited) knowledge of the data on the NIVA network, rather than because the more recent data is "more correct". 4.1.5. Detection limitsHeleen is happy to replace values below the detection limit with the detection limit itself, which is what my code currently does. See e-mail received 07/10/2016 at 15:18. 5. New trends analysisThis section tests the new code by applying it to the data for all years (i.e. the full adataset for each site).
###Code
# Import code for trends analysis
resa2_trends_path = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Python\icpw\toc_trends_analysis.py')
resa2_trends = imp.load_source('toc_trends_analysis', resa2_trends_path)
###Output
_____no_output_____
###Markdown
The list of projects to consider has been previously agreed with Heleen (see section 3 of [this notebook](http://nbviewer.jupyter.org/github/JamesSample/icpw/blob/master/toc_trends_2015_data_cleaning.ipynb)).
###Code
# User input
# Specify projects of interest
proj_list = ['ICPW_TOCTRENDS_2015_CA_ATL',
'ICPW_TOCTRENDS_2015_CA_DO',
'ICPW_TOCTRENDS_2015_CA_ICPW',
'ICPW_TOCTRENDS_2015_CA_NF',
'ICPW_TOCTRENDS_2015_CA_QU',
'ICPW_TOCTRENDS_2015_CZ',
'ICPW_TOCTRENDS_2015_Cz2',
'ICPW_TOCTRENDS_2015_FI',
'ICPW_TOCTRENDS_2015_NO',
'ICPW_TOCTRENDS_2015_SE',
'ICPW_TOCTRENDS_2015_UK',
'ICPW_TOCTRENDS_2015_US_LTM',
'ICPWaters Ca']
# Output paths
plot_fold = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Results\Trends_Plots')
res_csv = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Results\res.csv')
dup_csv = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Results\dup.csv')
nd_csv = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Results\nd.csv')
# Run analysis
res_df, dup_df, nd_df = resa2_trends.run_trend_analysis(proj_list, engine,
st_yr=None, end_yr=None,
plot=True, fold=plot_fold)
# Delete mk_std_dev col as not relevant here
del res_df['mk_std_dev']
# Write output
res_df.to_csv(res_csv, index=False)
dup_df.to_csv(dup_csv, index=False)
nd_df.to_csv(nd_csv, index=False)
res_df.head(14).sort_values(by='par_id')
###Output
Extracting data from RESA2...
The database contains duplicate values for some station-date-parameter combinations.
Only the most recent values will be used, but you should check the repeated values are not errors.
The duplicated entries are returned in a separate dataframe.
Some stations have no relevant data in the period specified. Their IDs are returned in a separate dataframe.
Done.
Converting units and applying sea-salt correction...
Done.
Calculating statistics...
Data series for Al at site 101 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 102 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 103 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 104 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 107 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 109 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 112 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 115 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 118 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 119 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 120 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 121 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 122 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 123 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 128 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 132 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 134 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 135 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 144 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 146 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 147 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 150 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 156 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 158 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 161 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 162 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 163 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 166 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 168 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 170 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 173 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 176 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 179 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 180 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 181 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 182 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 183 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 185 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 192 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 193 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 196 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 12081 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 23468 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 23546 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 36547 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 36560 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36733 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36739 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36750 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36753 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36793 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36797 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 37063 has fewer than 10 non-null values. Significance estimates may be unreliable.
Done.
Finished.
###Markdown
6. Data issuesThe code seems to be working correctly, but I still have concerns about some of the values in RESA. Looking at the output (`res.csv`), the following issues are fairly obvious: * A number of the Swedish stations have ridiculously high EH+ (i.e. low pH). They can be idenified with the following SQL: SELECT * FROM RESA2.STATIONS WHERE STATION_ID IN (36797, 36733, 36753, 36793, 36588, 36660, 36578, 36750, 36670, 36825, 36584, 36680, 36788, 36575, 36813, 36636, 36690, 36592, 36675, 36826, 36711, 36723, 36731, 36739); As far as I can tell, my trends code is working correctly, but the values in RESA2 for these sites must surely be wrong (pH is often * Sites 37124 and 37129 also have very high EH+/low pH. A quick check suggets that zeros have been entered into RESA instead of NoData in a few cases - this needs correcting. * Overall, **the entire dataset would benefit from being checked by someone with a better feel for realistic acid chemistry values than me**. 6.1. Correct sites 37124 and 37129 I've removed the zeros for pH from these datasets, as they've obviously been entered in error. 6.2. Correct Swedish sites Where have the very low pH values for the 24 Swedish sites come from? The raw data from the Focal Centre for these stations is here:K:\Prosjekter\langtransporterte forurensninger\O-23300 - ICP-WATERS - HWI\Tilsendte data fra Focalsentere\Sweden\EKSTRA2015These files contain sensible values for pH (usually around 7), so what's gone wrong? The filenames ending with the letters “ED” denote spreadsheets created by Tore for uploading into the database. He usually uses the ICPW template for this, but in this case he’s done something different. Strangely, pH seems to be missing from the `Lakesfrom 1988 3y` sheet of the "ED" files and it looks as though something has gone wrong with the upload process. In fact, it looks suspiciously like the values for Pb (in ug/l) have been entered as pH by mistake.Correcting this is fiddly, because I suspect the problem actually affects a large number of Swedish sites, not just the ones listed above. The difficulty is that these errors aren't obvious in cases where the Pb concentrations are high enough to be sensibly interpreted as pH.As a start, I've copied the correct pH data from the raw data file over to:C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Call_for_Data_2016\Replies\sweden\correct_ph_error.xlsLet's see how this compares to what's in the database.
###Code
# Read data from template
raw_xls = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Call_for_Data_2016'
r'\Replies\sweden\correct_ph_error.xls')
raw_df = pd.read_excel(raw_xls, sheetname='Data')
raw_df.head()
# Get unique list of site codes
stn_list = raw_df['Code'].unique()
# Get station_ids
sql = ('SELECT * FROM resa2.stations '
'WHERE station_code IN %s' % str(tuple(stn_list)))
stn_df = pd.read_sql(sql, engine)
# Decode special characters from `windows-1252` encoding to unicode
stn_df['station_name'] = stn_df['station_name'].str.decode('windows-1252')
stn_df.head()
# Get all samples from these sites
sql = ('SELECT * FROM resa2.water_samples '
'WHERE station_id IN %s' % str(tuple(stn_df['station_id'])))
samp_df = pd.read_sql(sql, engine)
samp_df.head()
# Get all the pH data for these sites
sql = ('SELECT * FROM resa2.water_chemistry_values2 '
'WHERE sample_id IN (SELECT water_sample_id '
'FROM resa2.water_samples '
'WHERE station_id IN %s) '
'AND method_id = 10268' % str(tuple(stn_df['station_id'])))
wc_df = pd.read_sql(sql, engine)
wc_df.head()
# Join samples
df = pd.merge(wc_df, samp_df, how='left',
left_on='sample_id',
right_on='water_sample_id')
# Join stations
df = pd.merge(df, stn_df, how='left',
left_on='station_id',
right_on='station_id')
# Join raw data
df = pd.merge(df, raw_df, how='left',
left_on=['station_code', 'sample_date'],
right_on=['Code', 'Date'])
# Extract columns of interest
df = df[['station_id', 'station_code', 'station_name', 'sample_date',
'sample_id', 'method_id', 'flag1', 'value', 'pH', 'Pb']]
df.head(10)
###Output
_____no_output_____
###Markdown
The `value` column in the above table shows pH measurements according to RESA2. The pH and Pb columns show the values in the raw data. It is clear that the database values agree with those in the raw data in many places (i.e. the database is correct), which only makes this issue stranger: I would expect all of this data to be uploaded in a single go, so it's surprising that the results are correct for some sites but not for others. I'm not sure what's going on here!Nevertheless, there are 310 occasions where values for Pb have been entered as pH by mistake.
###Code
print 'Number of occasions where Pb entered instead of pH:', len(df.query('value != pH').dropna(subset=['pH',]))
df2 = df.query('value != pH').dropna(subset=['pH',])
df2.head()
###Output
Number of occasions where Pb entered instead of pH: 310
###Markdown
The next step is to loop over these 310 records, replacing the numbers in the `value` column with those in the `pH` column. Note that a few of the Pb entries were associated with "less than" flags, so these need clearing as well.
###Code
df2.query('flag1 == "<"')
# Loop over rows
for index, row in df2.iterrows():
# Get data
samp_id = row['sample_id']
ph = row['pH']
# Update chem table
sql = ("UPDATE resa2.water_chemistry_values2 "
"SET value = %s, "
"flag1 = NULL "
"WHERE sample_id = %s "
"AND method_id = 10268"
% (ph, samp_id))
result = conn.execute(sql)
# Check changes have taken effect
sql = ('SELECT * FROM resa2.water_chemistry_values2 '
'WHERE sample_id = 597377 '
'AND method_id = 10268')
df3 = pd.read_sql(sql, engine)
df3
###Output
_____no_output_____
###Markdown
Finally, re-run the trends code to make sure the EH+ values are now more sensible.
###Code
# Run analysis
res_df, dup_df, nd_df = resa2_trends.run_trend_analysis(proj_list, engine,
st_yr=None, end_yr=None,
plot=False, fold=None)
# Delete mk_std_dev col as not relevant here
del res_df['mk_std_dev']
res_df.sort_values(by='mean', ascending=False).head()
###Output
Extracting data from RESA2...
The database contains duplicate values for some station-date-parameter combinations.
Only the most recent values will be used, but you should check the repeated values are not errors.
The duplicated entries are returned in a separate dataframe.
Some stations have no relevant data in the period specified. Their IDs are returned in a separate dataframe.
Done.
Converting units and applying sea-salt correction...
Done.
Calculating statistics...
Data series for Al at site 101 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 102 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 103 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 104 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 107 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 109 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 112 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 115 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 118 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 119 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 120 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 121 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 122 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 123 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 128 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 132 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 134 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 135 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 144 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 146 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 147 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 150 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 156 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 158 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 161 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 162 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 163 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 166 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 168 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 170 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 173 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 176 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 179 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 180 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 181 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 182 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 183 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 185 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 192 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 193 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 196 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 12081 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 23468 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 23546 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 36547 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 36560 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36733 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36739 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36750 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36753 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36793 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for EH at site 36797 has fewer than 10 non-null values. Significance estimates may be unreliable.
Data series for Al at site 37063 has fewer than 10 non-null values. Significance estimates may be unreliable.
Done.
Finished.
|
Zimnat_challenge/code.ipynb | ###Markdown
Functions
###Code
def timer(start_time=None):
"""Timer to measure working time of the algorithm"""
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, time_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(time_sec, 60)
print("Time taken: %i hours %i minutes %s seconds" % (thour, tmin, round(tsec, 2)))
def get_estimator(model, grid):
"""Function to select the best estimator using Random search CV"""
# Random cross validation for Hyperparameter tuning
x_random = RandomizedSearchCV(
estimator=model, param_distributions=grid, n_iter=5, n_jobs=-1, scoring='neg_log_loss', cv=5, verbose=2, random_state=33
)
start_time = timer(None) # Timing starts from this point for \"start_time\" variable
x_random.fit(X_train, y_train)
timer(start_time) # timing ends here for \"start_time\" variable"
return x_random.best_estimator_
def encode_LE(data, cols, verbose=True):
"""Function to Encode Data"""
for col in cols:
data[col], unique = pd.factorize(data[col], sort=True)
def age_group(data, col):
"""Function to create new feature age group"""
group = []
cat = None
for age in data[col]:
if (age > 0) & (age <= 30):
cat = 'young'
group.append(cat)
if (age > 30) & (age <= 60):
cat = 'adult'
group.append(cat)
if (age > 60) & (age < 100):
cat = 'elder'
group.append(cat)
return group
def get_output(data, clf, name):
"""Function to make predictions and store it into a csv file"""
x = {
'ID X PCODE': data['ID X PCODE']
}
# Prediction
y_pred = clf.predict(X_true)
x['Label'] = y_pred
output = pd.DataFrame(x)
output.reset_index(drop=True, inplace=True)
return output.to_csv(name, index=False)
###Output
_____no_output_____
###Markdown
A. Training Set 1. Load Data
###Code
print("*"*45, "Train set", "*"*45)
df = pd.read_csv(r"/home/praveen/Downloads/Data Science/Data projects/Insurance/file/Train.csv")
df.fillna(method='ffill', inplace=True)
df.sample(5).T
###Output
********************************************* Train set *********************************************
###Markdown
2. Restructure Data
###Code
# Products in the df
products = df[['P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
# Number of products
nop = products.shape[1]
pro_cols = products.columns.tolist()
# Age feature in years
df['Age'] = 2020 - df['birth_year']
# Age Group of customers
df['age_group'] = age_group(df, 'Age')
# Retention time in days
df['join_date'] = df['join_date'].apply(lambda x: datetime.strptime(str(x), "%d/%m/%Y"))
df['retention_time(days)'] = (datetime(2020,7,23) - df['join_date']).dt.days
# Retention time in years
df['retention_time(years)'] = df['retention_time(days)']/365
# Alligning features
df = df[['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code',
'occupation_code', 'occupation_category_code', 'Age', 'age_group', 'retention_time(days)',
'retention_time(years)',
'P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR','SOP4', 'RVSZ',
'PYUQ','LJR9', 'N2MW', 'AHXO', 'BSTQ', 'FM3X', 'K6QO', 'QBOL',
'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
###Output
_____no_output_____
###Markdown
Target Variable
###Code
# Assigning labels by sampling
products = df[['P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
df = df.melt(
id_vars = df.columns[:12], value_vars=products, var_name='PCODE', value_name='Label'
)
df['X'] = ' X '
# Combining Unique ID with code
df['ID X PCODE'] = df['ID'] + df['X'] + df['PCODE']
#Re-arrange columns
df = df[['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code', 'Age', 'age_group',
'retention_time(days)', 'retention_time(years)', 'occupation_code', 'occupation_category_code',
'PCODE', 'ID X PCODE', 'Label']]
df.head(5)
###Output
_____no_output_____
###Markdown
3. Base Model 3.1 Split Data
###Code
X = df.drop(['Label', 'ID', 'retention_time(years)', 'join_date', 'birth_year', 'ID X PCODE'], axis=1)
y = df['Label'].copy()
clean_X = autoclean(X)
X_train, X_val, y_train, y_val = train_test_split(clean_X, y, test_size=0.3, random_state=7)
###Output
_____no_output_____
###Markdown
3.2 Base Model Evaluation
###Code
# Classifier
base_clf = GradientBoostingClassifier().fit(X_train, y_train)
# Predictions
y_pred = base_clf.predict(X_val)
print(classification_report(y_val, y_pred))
x = {
'train score': round(base_clf.score(X_train, y_train), 2), # Training Accuracy
'test score': round(base_clf.score(X_val, y_val), 2), # Testing Accuracy
'ROC AUC score': round(roc_auc_score(y_val, y_pred), 2), # ROC AUC Score
'log loss': round(log_loss(y_val, y_pred), 2) # Log Loss Score
}
print(x)
###Output
precision recall f1-score support
0 0.97 0.98 0.98 163502
1 0.85 0.72 0.78 20030
accuracy 0.96 183532
macro avg 0.91 0.85 0.88 183532
weighted avg 0.95 0.96 0.95 183532
{'train score': 0.96, 'test score': 0.96, 'ROC AUC score': 0.85, 'log loss': 1.52}
###Markdown
4. Feature Significance
###Code
cols = X.columns.tolist()
encode_LE(X,['sex', 'marital_status', 'branch_code', 'occupation_category_code', 'PCODE', 'occupation_code',
'age_group'])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=33)
xgb = XGBClassifier()
xgb.fit(X_train, y_train)
# perform xgb importance
xgb_fea_imp = pd.DataFrame(list(xgb.get_booster().get_fscore().items()),
columns=['feature','importance']).sort_values('importance', ascending=False)
# get xgb importance
xgb_sig = xgb.feature_importances_
# perform permutation importance
results = permutation_importance(xgb, X, y, scoring='neg_log_loss')
# get permutation importance
importance = results.importances_mean
# List of feature f score
print('Feature Importance using f score :\n', '', xgb_fea_imp)
print('-'*50)
# Plot feature f score
plot_importance(xgb, )
plt.show()
print('-'*50)
# Summarize feature importance
print('Feature importance using xgboost :')
for i,v in enumerate(xgb_sig):
print('Feature: %0d, Score: %.5f' % (i,v))
print('-'*50)
# plot xgb feature importance
plt.figure(figsize=[15, 4])
plt.bar([x for x in cols], xgb_sig)
plt.xticks(rotation=90)
plt.show()
print('-'*50)
# Summarize feature importance
print('Feature importance using permutation :')
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
print('-'*50)
# plot permutation feature importance
plt.figure(figsize=[15, 4])
plt.bar([x for x in cols], importance)
plt.xticks(rotation=90)
plt.show()
###Output
Feature Importance using f score :
feature importance
0 PCODE 1500
1 retention_time(days) 1176
4 occupation_code 718
3 branch_code 695
6 Age 641
2 marital_status 261
5 occupation_category_code 125
7 sex 90
8 age_group 21
--------------------------------------------------
###Markdown
Feature SelectionThe Following columns will be dropped -- join_date - birth_year- retention_time(days)- Age Split Data
###Code
X_fs = df.drop(['Label', 'ID', 'ID X PCODE', 'join_date', 'birth_year', 'retention_time(days)', 'Age'], axis=1)
y_fs = df['Label'].copy()
encode_LE(X_fs,['sex', 'marital_status', 'branch_code', 'occupation_category_code', 'PCODE', 'occupation_code',
'age_group'])
# Train and Validation set
X_train, X_val, y_train, y_val = train_test_split(X_fs, y_fs, test_size=0.3, random_state=33)
###Output
_____no_output_____
###Markdown
5. Hyperparameter Tuning Grid
###Code
# Number of trees in the model
max_iters = [int(x) for x in np.arange(1000, 2000, 200)]
# Learning rate in XGBClassifier
learning_rate = [round(x, 2) for x in np.arange(0.05, 0.3, 0.05)]
# Number of trees in the model
n_estimators = [int(x) for x in np.arange(100, 300, 50)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(9, 35, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
gb_grid = {'learning_rate': learning_rate,
'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': [2, 3, 4, 5, 6, 7, 8],
'min_samples_leaf': list(np.arange(1,7)),
'max_leaf_nodes': list(np.arange(2,11,2))}
print("Grid for Hyperparameter tuning :\n", gb_grid)
print('-'*100)
###Output
Grid for Hyperparameter tuning :
{'learning_rate': [0.05, 0.1, 0.15, 0.2, 0.25], 'n_estimators': [100, 150, 200, 250], 'max_depth': [9, 11, 14, 16, 19, 22, 24, 27, 29, 32, 35, None], 'min_samples_split': [2, 3, 4, 5, 6, 7, 8], 'min_samples_leaf': [1, 2, 3, 4, 5, 6], 'max_leaf_nodes': [2, 4, 6, 8, 10]}
----------------------------------------------------------------------------------------------------
###Markdown
Tuned Model Evaluation
###Code
gb_imb = get_estimator(GradientBoostingClassifier(), gb_grid)
print('Best Estimator :\n', gb_imb)
print('-'*100)
# Fit model on train set
gb_imb.fit(X_train, y_train)
# Predictions
y_pred = gb_imb.predict(X_val)
print(classification_report(y_val, y_pred))
x = {
'train score': round(gb_imb.score(X_train, y_train), 2), # Training Accuracy
'test score': round(gb_imb.score(X_val, y_val), 2), # Testing Accuracy
'ROC AUC Score': round(roc_auc_score(y_val, y_pred), 2), # ROC AUC Score
'log loss': round(log_loss(y_val, y_pred), 2) # Log Loss Score
}
print(x)
###Output
precision recall f1-score support
0 0.97 0.99 0.98 163557
1 0.87 0.75 0.81 19975
accuracy 0.96 183532
macro avg 0.92 0.87 0.89 183532
weighted avg 0.96 0.96 0.96 183532
{'train score': 0.96, 'test score': 0.96, 'ROC AUC Score': 0.87, 'log loss': 1.35}
###Markdown
B. Test Set 1. Load Dataset
###Code
### B. Testing set
print("*"*45, "Test set", "*"*45)
test = pd.read_csv(r"/home/praveen/Downloads/Data Science/Data projects/Insurance/file/Test.csv")
# Filling NaN values
test.fillna(method='ffill', inplace=True)
###Output
********************************************* Test set *********************************************
###Markdown
2. Restructure Data
###Code
# Products in the test
products = test[['P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
# Age feature in years
test['Age'] = 2020 - test['birth_year']
# Age Group of customers
test['age_group'] = age_group(test, 'Age')
# Retention time in days
test['join_date'] = test['join_date'].apply(lambda x: datetime.strptime(str(x), "%d/%m/%Y"))
test['retention_time(days)'] = (datetime(2020,7,23) - test['join_date']).dt.days
# Retention time in years
test['retention_time(years)'] = test['retention_time(days)']/365
# Alligning features
test = test[['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code',
'occupation_code', 'occupation_category_code', 'Age', 'age_group', 'retention_time(days)',
'retention_time(years)',
'P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR','SOP4', 'RVSZ',
'PYUQ','LJR9', 'N2MW', 'AHXO', 'BSTQ', 'FM3X', 'K6QO', 'QBOL',
'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
###Output
_____no_output_____
###Markdown
Target Variable
###Code
# Assigning labels by sampling
products = test[['P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']]
test = test.melt(
id_vars = test.columns[:12], value_vars=products, var_name='PCODE', value_name='Label'
)
test['X'] = ' X '
# Combining Unique ID with code
test['ID X PCODE'] = test['ID'] + test['X'] + test['PCODE']
#Re-arrange columns
test = test[['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code', 'Age', 'age_group',
'retention_time(days)', 'retention_time(years)', 'occupation_code', 'occupation_category_code',
'PCODE', 'ID X PCODE', 'Label']]
###Output
_____no_output_____
###Markdown
Feature Engineering
###Code
X_true = test.drop(['Label', 'ID', 'ID X PCODE', 'join_date', 'birth_year', 'retention_time(days)', 'Age'], axis=1)
encode_LE(X_true,['sex', 'marital_status', 'branch_code', 'occupation_category_code', 'PCODE', 'occupation_code',
'age_group'])
###Output
_____no_output_____
###Markdown
Output
###Code
get_output(
test, gb_imb, 'gb_imb_output.csv')
###Output
_____no_output_____ |
deep-learning/multi-frameworks/notebooks/Keras_TF_RNN.ipynb | ###Markdown
High-level RNN Keras (TF) Example
###Code
import os
import sys
import numpy as np
os.environ['KERAS_BACKEND'] = "tensorflow"
import keras as K
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Embedding, GRU, CuDNNGRU
from common.params_lstm import *
from common.utils import *
# Force one-gpu
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Keras: ", K.__version__)
print("Numpy: ", np.__version__)
print("Tensorflow: ", tf.__version__)
print(K.backend.backend())
print(K.backend.image_data_format())
print("GPU: ", get_gpu_name())
print(get_cuda_version())
print("CuDNN Version ", get_cudnn_version())
def create_symbol(CUDNN=True, maxf=MAXFEATURES, edim=EMBEDSIZE, nhid=NUMHIDDEN, maxl=MAXLEN):
model = Sequential()
model.add(Embedding(maxf, edim, input_length=maxl))
# Only return last output
if not CUDNN:
model.add(GRU(nhid, return_sequences=False, return_state=False))
else:
model.add(CuDNNGRU(nhid, return_sequences=False, return_state=False))
model.add(Dense(2, activation='softmax'))
return model
def init_model(m, lr=LR, b1=BETA_1, b2=BETA_2, eps=EPS):
m.compile(
loss = "categorical_crossentropy",
optimizer = K.optimizers.Adam(lr, b1, b2, eps),
metrics = ['accuracy'])
return m
%%time
# Data into format for library
x_train, x_test, y_train, y_test = imdb_for_library(seq_len=MAXLEN, max_features=MAXFEATURES, one_hot=True)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)
%%time
# Load symbol
sym = create_symbol()
%%time
# Initialise model
model = init_model(sym)
model.summary()
%%time
# Main training loop: 26s
model.fit(x_train,
y_train,
batch_size=BATCHSIZE,
epochs=EPOCHS,
verbose=1)
%%time
# Main evaluation loop: 3s
y_guess = model.predict(x_test, batch_size=BATCHSIZE)
y_guess = np.argmax(y_guess, axis=-1)
y_truth = np.argmax(y_test, axis=-1)
print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess))
###Output
Accuracy: 0.85496
|
hw10/hw10.ipynb | ###Markdown
Stat 133 Homework 10Xinyang Geng
###Code
library(DataComputing)
library(XML)
###Output
_____no_output_____
###Markdown
1. We load the file earthquakes.csv into R
###Code
data = read.csv(file="earthquakes.csv")%>%
filter(Magnitude>=4)
long = data$Longitude
lat = data$Latitude
head(data)
###Output
_____no_output_____
###Markdown
2. Create XML document
###Code
Doc = newXMLDoc()
Root = newXMLNode("kml",namespaceDefinitions = "http://www.opengis.net/kml/2.2", doc = Doc)
Docmt = newXMLNode("Document", parent = Root)
Name = newXMLNode("Name", "Earthquakes", parent = Docmt)
Description = newXMLNode("Description", "4+ Earthquakes, 1966-present", parent = Docmt)
###Output
_____no_output_____
###Markdown
3. Adding nodes and Timestamp
###Code
n = nrow(data)
Dtime = as.character(data$DateTime)
Dtimefix = gsub("/", "-", Dtime)
time = gsub(" ","T",Dtimefix)
for (i in 1:n)
{
M = newXMLNode("Placemark", parent = Docmt)
T = newXMLNode("Point", parent = M)
cood = c(long[i],",",lat[i])
newXMLNode("coordinates", cood, parent = T)
S = newXMLNode("TimeStamp",parent=M)
newXMLNode("when", time[i],"+08:00", parent=S)
}
saveXML(Doc, "earthquakes6.kml")
###Output
_____no_output_____
###Markdown
Thực hiện lại phần demo trên dữ liệu InstaCart:1. Sử dụng hàm 'interactive' thay cho hàm 'interact'2. Bổ sung thêm Button sao cho sau khi chọn các options xong, phải click lênButton mới thực hiện vẽ lại figureLuyện tập: áp dụng interactive widgets cho phân tích EDA dữ liệu chuyến bay
###Code
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import ipywidgets as widgets
print(widgets.__version__)
df = pd.read_csv('aisle_deparment_counts.csv')
df.head(3)
df.info()
from IPython.display import display, clear_output
df = df.sort_values('counts', ascending=False)
df.reset_index(drop=True, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
20 largest aisles and the rest
###Code
N = 20
df1 = df[['counts', 'aisle']][0:N]
other_count = df['counts'][N:].sum()
print(other_count)
other = pd.DataFrame({'counts': [other_count], 'aisle': ['others']})
df1 = df1.append(other, ignore_index=True)
df1.tail(5)
###Output
2802620
###Markdown
Bar plot
###Code
wOutput = widgets.Output(layout={'border': '1px solid white'})
wIntSlider = widgets.IntSlider(description="Aisles", value=10, min=3, max=20, step=1)
wButton = widgets.Button(description="Plot", button_style='info', tooltip="Click here", icon= 'check')
# display(wIntSlider, wOutput)
def aislePlot(N):
df2 = df[['counts', 'aisle']][0:N]
other = pd.DataFrame({'counts': [df['counts'][N:].sum()], 'aisle': ['others']})
df2 = df2.append(other, ignore_index=True)
df2.plot.bar(y='counts', color='orange', alpha=0.7, figsize=(8,3))
plt.xticks(df2.index,df2['aisle'], rotation=-60, ha='left')
plt.title("The largest aisles by orders")
plt.show()
plt.close('all')
wHBox = widgets.HBox([wIntSlider, wButton])
display(wHBox)
wAislePlot = widgets.interactive(aislePlot, N=wIntSlider)
def wAction(click):
with wOutput:
wOutput.clear_output()
global wIntSlider
display(wIntSlider)
# print(wIntSlider.value)
aislePlot(N=wIntSlider.value)
# display(wAislePlot)
wButton.on_click(wAction)
display(wOutput)
# wVBox = widgets.VBox([wHBox, wOutput])
# display(wVBox)
display(wAislePlot)
###Output
_____no_output_____ |
PVPY Demo.ipynb | ###Markdown
Spectra Plot a couple spectra. How about the AM1.5G and a blackbody with T=5800 K, the default value
###Code
am15g = pvpy.PowerSpectrum(spectra="AM1.5G")
bb5800 = pvpy.PowerSpectrum(spectra="BlackBody", bbtemp=5800)
# The spectrum objects hold their spectrum as an Nx2 numpy array that you can call with the dot operator,
# the __getitem__() method (aka square brackets[]),
# or more formally via the get_spectrum() method
plt.plot(am15g[0], am15g[1], bb5800[0], bb5800[1])
plt.ylabel(r"$W m^{-2} s^{-1}$")
plt.xlabel("Wavelength (nm)")
print(am15g([400]))
###Output
[ 1.1141]
###Markdown
pvpy allows you to easily plot photon or photocurrent spectra too. Either create the spectrum directly, or convert an existing spectrum to the desired. Spectrum objects are: PowerSpectrum (W/m^2), PhotonSpectrum (photons / (m^2 s)), PhotocurrentSpectrum (A/m^2)
###Code
# create a photon spectrum directly
am15g = pvpy.PhotonSpectrum(spectra="AM1.5G")
# or convert the previous power spectrum
bb5800.to_PhotonSpectrum()
# The spectrum objects hold their spectrum as an Nx2 numpy array that you can call with the dot operator, or
# more formally via the get_spectrum() method
plt.plot(am15g[0], am15g[1], bb5800[0], bb5800[1])
plt.ylabel(r"photons $m^{-2} s^{-1}$")
plt.xlabel("Wavelength (nm)")
###Output
_____no_output_____
###Markdown
pvpy was made with theortical photocurrent from an EQE spectrum in mind. How much photocurrent is in the AM1.5G spectrum below 1200nm? Feed the jsc() function a unity Nx2 EQE spectrum. The result is given in the common units of mA/cm^2.
###Code
wavelengths = np.arange(400, 1200)
EQE_spectrum = np.ones(wavelengths.shape)
EQE_spectrum = np.vstack((wavelengths, EQE_spectrum))
max_photocurrent0 = pvpy.jsc(EQE_spectrum)
# For a cell with non-perfect EQE
EQE_spectrum[1] = np.linspace(1, 0, len(wavelengths))
max_photocurrent1 = pvpy.jsc(EQE_spectrum)
print("Perfect EQE gives %0.2f mA/cm^2 and a linearly decreasing EQE give %0.2f mA/cm^2." % (max_photocurrent0, max_photocurrent1))
###Output
Perfect EQE gives 45.06 mA/cm^2 and a linearly decreasing EQE give 24.38 mA/cm^2.
###Markdown
Detailed Balance Solar Cells pvpy includes detailed balance analysis for solar cells. Create a solar cell object, assign it properties such as bandgap, radiative (LED) effciency, temperature, ideality, absorbtivity, or tilt from sun. Future releases aim to include a double diode model, with series and shunt resistance. Create an illumination spectrum and illuminate the cell, then show the JV curve for the cell in the dark and in the light
###Code
# SolarCell defaults are shown below. Assign other properties with the dot-operator
mysolarcell = pvpy.SolarCell(bandgap=1.1, celltemp=300, tilt=0, back_reflector=True)
mysolarcell.LED_eff = .01 # Silicon has poor LED efficiency
voltages = np.linspace(-.1, .725, 250)
#the cell outputs in SI units, A/m^2, use 0.1 to convert to mA/cm^2
dark_JV = mysolarcell.get_current(voltages) * .1
plt.plot(voltages, dark_JV)
# create an illumination spectrum. The default black body spectrum is a sun with T=5800K
mysolarspectrum = pvpy.PhotonSpectrum(spectra="AM1.5G")
# illuminate the cell
mysolarcell.set_illumination(mysolarspectrum)
light_JV = mysolarcell.get_current(voltages) * .1
plt.plot(voltages, light_JV)
plt.ylabel(r"Current Density $mA/cm^{2}$")
plt.xlabel("Voltage (V)")
print("The incident power on my Silicon cell is %0.1f W/m^2 and it is %0.1f %% efficient." % (mysolarcell.incident_power, 100*mysolarcell.get_efficiency()))
###Output
The incident power on my Silicon cell is 1000.4 W/m^2 and it is 27.1 % efficient.
###Markdown
Recreate the famous efficiency versus bandgap curves for the 4 default illumination spectra. You can always define your own custom spectrum by passing a (2,N) numpy array to the ``spectra`` keyword
###Code
bandgaps = np.linspace(.35,4,125)
spectras = ["AM1.5G", "AM1.5D", "AM0Etr", "BlackBody",]
for spectra in spectras:
def effofbandgap(bandgap):
cell = pvpy.SolarCell(bandgap=bandgap)
spectrum = pvpy.PhotocurrentSpectrum(spectra=spectra)
cell.set_illumination(spectrum)
return cell.get_efficiency() *100
effciencies = [effofbandgap(bandgap) for bandgap in bandgaps]
plt.plot(bandgaps, effciencies)
plt.ylabel(r"Single Junction Efficiency")
plt.xlabel("Bandgap Energy (eV)")
plt.legend(spectras)
###Output
_____no_output_____ |
Stock_Algorithms/Basic_Machine_Learning_Predicts.ipynb | ###Markdown
Simple Linear Regression for stock using scikit-learn
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
stock = 'AAPL'
start = '2016-01-01'
end = '2018-01-01'
data = yf.download(stock, start, end)
data.head()
df = data.reset_index()
df.head()
X = df.drop(['Date','Close'], axis=1)
y = df['Adj Close']
from sklearn.model_selection import train_test_split
# Split X and y into X_
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25, random_state=0)
from sklearn.linear_model import LinearRegression
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
intercept = regression_model.intercept_
print("The intercept for our model is {}".format(intercept))
regression_model.score(X_test, y_test)
from sklearn.metrics import mean_squared_error
y_predict = regression_model.predict(X_test)
regression_model_mse = mean_squared_error(y_predict, y_test)
regression_model_mse
math.sqrt(regression_model_mse)
# input the latest Open, High, Low, Close, Volume
# predicts the next day price
regression_model.predict([[167.81, 171.75, 165.19, 166.48, 37232900]])
###Output
_____no_output_____ |
c7_classification_performance_measures/08_Confusion_Matrix_in_Multiclass_Classification.ipynb | ###Markdown
多分类问题中的混淆矩阵
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
from sklearn.model_selection._split import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=666)
from sklearn.linear_model.logistic import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
log_reg.score(X_test, y_test)
y_predict =log_reg.predict(X_test)
###Output
_____no_output_____
###Markdown
多分类问题求精准率
###Code
from sklearn.metrics.scorer import precision_score
precision_score(y_test, y_predict)
###Output
_____no_output_____
###Markdown
默认参数average='binary',只能解决二分类问题
###Code
# average设为None就只返回分数?
precision_score(y_test, y_predict, average=None)
precision_score(y_test, y_predict, average='micro')
###Output
_____no_output_____
###Markdown
混淆矩阵天然支持多分类问题
###Code
from sklearn.metrics.classification import confusion_matrix
confusion_matrix(y_test, y_predict)
###Output
_____no_output_____
###Markdown
一种直观的发现什么地方犯错较多的方法
###Code
cfm = confusion_matrix(y_test, y_predict)
plt.matshow(cfm, cmap=plt.cm.gray) # 映射为一个矩阵排列的灰度图?
plt.show()
###Output
_____no_output_____
###Markdown
最亮的地方就是(样本)数目比较大的地方,图中的对角线就对应着评估正确的样本,不过我们的关注重点不在预测正确的部分,而是关注哪里预测错了
###Code
# 每一行有多少个样本
row_sums = np.sum(cfm, axis=1)
# 每一格就是占比
err_matrix = cfm / row_sums
# 很明显,对角线上的数是最大的,但我们并不关心对角线上的数(因为都是正确的)
# 我们把对角线全变为0
np.fill_diagonal(err_matrix, 0)
err_matrix
plt.matshow(err_matrix, cmap=plt.cm.gray) # 映射为一个矩阵排列的灰度图?
plt.show()
###Output
_____no_output_____ |
variable_exploration/mk/.ipynb_checkpoints/pipeline-checkpoint.ipynb | ###Markdown
Pipelin is the boss
###Code
# Create a pipeline that standardizes the data then creates a model
import os
from datetime import datetime
import numpy as np
import pandas as pd
#read data, create listings dataframe
path = '../../data/new-york-city-airbnb-open-data/'
listings_csv = os.path.join(path,'listings.csv')
listings = pd.read_csv(listings_csv)
def less_than_50_percent(column):
total_row = listings.shape[0]
isnull_count = listings[column].isna().sum()
if isnull_count/total_row > .5:
return True
columns = list(listings)
remove_columns_0 = []
for column in columns:
remove_column_y_n = less_than_50_percent(column)
if remove_column_y_n:
remove_columns_0.append(column)
print(remove_columns_0)
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
numeric_features = listings.select_dtypes(include=['int64', 'float64']).columns
categorical_features = listings.select_dtypes(include=['object']).drop(['Loan_Status'], axis=1).columns
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(
transformers=[
# ('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
from sklearn.ensemble import RandomForestClassifierrf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', RandomForestClassifier())])
###Output
_____no_output_____ |
tutorial/3_2_Functions.ipynb | ###Markdown
3.2 Coding Concepts: Functions FunctionsA function is a block of code that can be reused elsewhere in your notebook. Each function has a name, and we execute the function by *calling* it, using the function name. Functions are the building blocks of computer programming because they allow us to organize our code into meaningful pieces. See [Functions](https://python.swaroopch.com/functions.html) for more information. Function defintionsA function is defined using the `def` statment. It requires both a function name, which is followed by parenthesis `()` and a colon `:`, as well as a block of code indented below. Here is a simple function that prints `"Hello_80L"`.
###Code
# define a function that prints "hello world!"
def hello_world():
print("Hello_80l!")
###Output
_____no_output_____
###Markdown
Now we call the function by writing the function name ***and*** parenthesis!
###Code
# call the function
hello_world()
###Output
Hello_80l!
###Markdown
Return statements Functions can return values. This is written using the `return` statement and the value you wish to return. When you call the function, you get the value it returns. Here we define a function that returns the value `99`. Note that the `return` statment is generally the last line of the function, because once the interpreter executes a `return` statement it leaves the function and returns to the program that called it.
###Code
# define a funtion that returns the value 99
def a_return_function():
return 99
###Output
_____no_output_____
###Markdown
Call the function.
###Code
# call the function
a_return_function()
###Output
_____no_output_____
###Markdown
We can even use that returned value to do arithmetic!
###Code
# call the function and do arithmetic
1 + a_return_function()
###Output
_____no_output_____
###Markdown
Both in oneFunctions can do many things, including both printing and returning a value. Here we define a function that does both.
###Code
# define a funtion that returns the value 99
def my_first_function():
# print
print("Hello_80L!")
# return
return 99
###Output
_____no_output_____
###Markdown
And call it.
###Code
my_first_function()
###Output
Hello_80L!
###Markdown
ArgumentsFunctions may take one or more *arguments*. Arguments are defined by argument names within the parentheses of the function definition. Arguments allow us to pass information from outside the function into the function for use within our code block. Arguments are similar to variables becuase they associate a name with a value, but they are different because we don't define them using a variable declaration statement.
###Code
# define a function with arguments
def function_with_arguments(arg1, arg2):
# print the first argument
print(arg1)
# print the second argument
print(arg2)
# return the product
return arg1 * arg2
###Output
_____no_output_____
###Markdown
When we call a function that has arguments, we pass values to the function by placing them inside parenthesis. Each value is passed to the corresponding argument name.
###Code
# call the function
print(function_with_arguments(3, 76))
###Output
3
76
228
###Markdown
AMB as a functionHere's the AMB function that lives inside the `mai` course package. All I've done is take the giant `while` loop from the previous tutorial and put it inside the body of a function. I added *arguments* for each of the AMB parameters and also a `return` statement to return the lists of pitches and durations generated by AMB.
###Code
def amb(pitch_center=40, pitch_range=6, pulse=120, rhythm=0.0, detune=0.0, repeat=0.5, memory=5, length=24):
# start with empty lists for both pitch and duration
my_pitches = []
my_durs = []
# loop until we have enough notes
while len(my_durs) < length:
# do we look back?
if random.random() <= repeat and len(my_pitches) >= memory:
# use the fifth previous note
new_pitch = my_pitches[-memory]
new_dur = my_durs[-memory]
# if we don't look back
else:
# choose pitch
new_pitch = random.randint(pitch_center - pitch_range, pitch_center + pitch_range)
# microtonal pitch adjustment
new_pitch += random.uniform(-detune, detune)
# choose duration
new_dur = (60.0 / pulse) * random.uniform(1-rhythm, 1+rhythm)
# append to the melody
my_pitches += [new_pitch]
my_durs += [new_dur]
return my_pitches, my_durs
###Output
_____no_output_____ |
week4_approx_rl/homework_tf.ipynb | ###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here>
return <...>
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs,interpolation='none',cmap='gray');
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
:returns: return sum of rewards over time
"""
# Play the game for n_steps as per instructions above
<YOUR CODE>
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
tf.get_default_session().run(assigns)
load_weigths_into_target_network(agent, target_network)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = ### YOUR CODE: compute q-values for NEXT states with target network
next_state_values_target = ### YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
reference_qvalues = ### YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import ewma
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
load_weigths_into_target_network(agent, target_network)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(pd.ewma(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here>
return <...>
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs,interpolation='none',cmap='gray');
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE>
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
# tf.get_default_session().run(assigns)
return assigns
# create the tf copy graph only once.
copy_step=load_weigths_into_target_network(agent, target_network)
sess.run(copy_step)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = ### YOUR CODE: compute q-values for NEXT states with target network
next_state_values_target = ### YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
reference_qvalues = ### YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import DataFrame
moving_average = lambda x, span=100, **kw: DataFrame({'x':np.asarray(x)}).x.ewm(span=span, **kw).mean().values
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
#load_weigths_into_target_network(agent, target_network)
#calling 'load_weights_into_target_network' repeatedly cause creating tf copy operator
#again and again, which bloat memory consumption along training step
#create'copy_step' once
sess.run(copy_step)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(moving_average(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here>
return <...>
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs,interpolation='none',cmap='gray');
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE>
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
tf.get_default_session().run(assigns)
load_weigths_into_target_network(agent, target_network)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
#if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
# !bash ../xvfb start
# %env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from skimage.transform import resize
from skimage.color import rgb2gray, gray2rgb
from skimage import img_as_float
from gym.core import ObservationWrapper
from gym.spaces import Box
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, (self.img_size[0], self.img_size[1], 1))
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
#<Your code here>
img = img[50:-5,5:-5]
img = resize(img, self.img_size)
img = rgb2gray(img)
img = np.expand_dims(img,-1)
img = img_as_float(img)
img = np.float32(img)
#print(img.shape)
return img
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print("Formal tests seem fine. Here's an example of what you'll get.")
plt.title("what your network gonna see")
plt.imshow(obs[:,:,0],interpolation='none',cmap='gray');
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: <class '__main__.PreprocessAtari'> doesn't implement 'observation' method. Maybe it implements deprecated '_observation' method.[0m
Formal tests seem fine. Here's an example of what you'll get.
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
/home/sheshank/anaconda3/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.
warn("The default mode, 'constant', will be changed to 'reflect' in "
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
import keras.layers as L
import keras
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
#< Define your network body here. Please make sure you don't use any layers created elsewhere >
self.network = keras.models.Sequential()
self.network.add(L.InputLayer(state_shape))
self.network.add(L.Conv2D(16, (3,3), strides=2, activation='relu'))
self.network.add(L.Conv2D(32, (3,3), strides=2, activation='relu'))
self.network.add(L.Conv2D(64, (3,3), strides=2, activation='relu'))
self.network.add(L.Flatten())
self.network.add(L.Dense(256, activation='relu'))
self.network.add(L.Dense(n_actions))
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
qvalues = self.network(state_t) #< apply your network layers here >
#qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
/home/sheshank/anaconda3/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.
warn("The default mode, 'constant', will be changed to 'reflect' in "
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
:returns: return sum of rewards over time
Note: please do not env.reset() unless env is done.
It is guaranteed that env has done=False when passed to this function.
"""
# State at the beginning of rollout
s = env.framebuffer
# Play the game for n_steps as per instructions above
# <YOUR CODE>
total_reward = 0.0
for _ in range(n_steps):
qvalues = agent.get_qvalues([s])
action = agent.sample_actions(qvalues)[0]
#print(action)
next_s, r, done, _ = env.step(action)
#print(done)
exp_replay.add(s, action, r, next_s, done)
total_reward +=r
s = next_s
if done:
s = env.reset()
return total_reward
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
/home/sheshank/anaconda3/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.
warn("The default mode, 'constant', will be changed to 'reflect' in "
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
tf.get_default_session().run(assigns)
load_weigths_into_target_network(agent, target_network)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
It works!
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
# compute q-values for NEXT states with target network
next_qvalues_target = target_network.get_symbolic_qvalues(next_obs_ph) #<YOUR CODE>
# compute state values by taking max over next_qvalues_target for all actions
next_state_values_target = tf.reduce_max(next_qvalues_target, axis=1) #<YOUR CODE>
# compute Q_reference(s,a) as per formula above.
reference_qvalues = rewards_ph + gamma * next_state_values_target * is_not_done #<YOUR CODE>
# Define loss function for sgd.
td_loss = (current_action_qvalues - tf.stop_gradient(reference_qvalues)) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-4).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
Splendid!
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import ewma
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
load_weigths_into_target_network(agent, target_network)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(pd.ewma(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
That's good enough for tutorial.
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
# XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
%env DISPLAY = : 1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here >
return < ... >
import gym
# spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") # create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
# test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs)) > 2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(
obs) <= 1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs, interpolation='none', cmap='gray')
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0, 2, 1]).reshape([state_dim[0], -1]))
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder(
'float32', [None, ] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(
qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice(
[0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(
axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(),
1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE >
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i" % len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]" % (
np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (
10,), "actions batch should have shape (10,) but is instead %s" % str(act_batch.shape)
assert reward_batch.shape == (
10,), "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape)
assert is_done_batch.shape == (
10,), "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape)
assert [int(i) in (0, 1)
for i in is_dones], "is_done should be strictly True or False"
assert [
0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
# tf.get_default_session().run(assigns)
return assigns
# create the tf copy graph only once.
copy_step = load_weigths_into_target_network(agent, target_network)
sess.run(copy_step)
# check that it works
sess.run([tf.assert_equal(w, w_target)
for w, w_target in zip(agent.weights, target_network.weights)])
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(
actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = # YOUR CODE: compute q-values for NEXT states with target network
# YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
next_state_values_target =
reference_qvalues = # YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(
1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(
sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[
0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[
0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[
0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[
0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import DataFrame
moving_average = lambda x, span=100, **kw: DataFrame(
{'x': np.asarray(x)}).x.ewm(span=span, **kw).mean().values
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
batch_size)
return {
obs_ph: obs_batch, actions_ph: act_batch, rewards_ph: reward_batch,
next_obs_ph: next_obs_batch, is_done_ph: is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss],
sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
#load_weigths_into_target_network(agent, target_network)
# calling 'load_weights_into_target_network' repeatedly cause creating tf copy operator
# again and again, which bloat memory consumption along training step
# create'copy_step' once
sess.run(copy_step)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" %
(len(exp_replay), agent.epsilon))
plt.subplot(1, 2, 1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 2)
plt.title("TD loss history (moving average)")
plt.plot(moving_average(
np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
# Don't forget to reset epsilon back to previous value if you want to go on training
agent.epsilon = 0
# record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(), directory="videos", force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
# show video
from IPython.display import HTML
import os
video_names = list(
filter(lambda s: s.endswith(".mp4"), os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
if os.environ.get("DISPLAY") is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here>
return <...>
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs,interpolation='none',cmap='gray');
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
:returns: return sum of rewards over time
"""
# Play the game for n_steps as per instructions above
<YOUR CODE>
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
tf.get_default_session().run(assigns)
load_weigths_into_target_network(agent, target_network)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = ### YOUR CODE: compute q-values for NEXT states with target network
next_state_values_target = ### YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
reference_qvalues = ### YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import ewma
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
load_weigths_into_target_network(agent, target_network)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(pd.ewma(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = ### YOUR CODE: compute q-values for NEXT states with target network
next_state_values_target = ### YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
reference_qvalues = ### YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import ewma
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
load_weigths_into_target_network(agent, target_network)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(pd.ewma(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
# XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
%env DISPLAY = : 1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
import cv2
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (1, 64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
img = cv2.resize(img, self.img_size[1:], interpolation=cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rows, cols = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
img = cv2.warpAffine(img, M, (cols, rows))
img = (img / 255).astype('float32')[:, :, np.newaxis]
return img.transpose()
import gym
# spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") # create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
print(obs.shape)
# test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs)) > 2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(
obs) <= 1, "convert image pixels to (0,1) range"
print("Formal tests seem fine. Here's an example of what you'll get.")
plt.title("what your network gonna see")
plt.imshow(obs[0], interpolation='none', cmap='gray')
###Output
(1, 64, 64)
Formal tests seem fine. Here's an example of what you'll get.
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0, 2, 1]).reshape([state_dim[0], -1]))
plt.show()
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
from keras.models import Sequential
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
self.model = Sequential()
self.model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
self.model.add(Conv2D(32, (3, 3), activation='relu'))
self.model.add(Conv2D(64, (3, 3), activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(3136, 256, activation='relu'))
self.model.add(Dense(256, n_actions))
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None, ] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(
qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice(
[0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(
axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(),
1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE >
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i" % len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]" % (
np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (
10,), "actions batch should have shape (10,) but is instead %s" % str(act_batch.shape)
assert reward_batch.shape == (
10,), "rewards batch should have shape (10,) but is instead %s" % str(reward_batch.shape)
assert is_done_batch.shape == (
10,), "is_done batch should have shape (10,) but is instead %s" % str(is_done_batch.shape)
assert [int(i) in (0, 1)
for i in is_dones], "is_done should be strictly True or False"
assert [
0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
# tf.get_default_session().run(assigns)
return assigns
# create the tf copy graph only once.
copy_step = load_weigths_into_target_network(agent, target_network)
sess.run(copy_step)
# check that it works
sess.run([tf.assert_equal(w, w_target)
for w, w_target in zip(agent.weights, target_network.weights)])
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(
actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = # YOUR CODE: compute q-values for NEXT states with target network
# YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
next_state_values_target =
reference_qvalues = # YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(
1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(
sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[
0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[
0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[
0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[
0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import DataFrame
moving_average = lambda x, span=100, **kw: DataFrame(
{'x': np.asarray(x)}).x.ewm(span=span, **kw).mean().values
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(
batch_size)
return {
obs_ph: obs_batch, actions_ph: act_batch, rewards_ph: reward_batch,
next_obs_ph: next_obs_batch, is_done_ph: is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss],
sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
#load_weigths_into_target_network(agent, target_network)
# calling 'load_weights_into_target_network' repeatedly cause creating tf copy operator
# again and again, which bloat memory consumption along training step
# create'copy_step' once
sess.run(copy_step)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" %
(len(exp_replay), agent.epsilon))
plt.subplot(1, 2, 1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 2)
plt.title("TD loss history (moving average)")
plt.plot(moving_average(
np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
# Don't forget to reset epsilon back to previous value if you want to go on training
agent.epsilon = 0
# record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(), directory="videos", force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
# show video
from IPython.display import HTML
import os
video_names = list(
filter(lambda s: s.endswith(".mp4"), os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices
###Output
_____no_output_____
###Markdown
Deep Q-Network implementationThis notebook shamelessly demands you to implement a DQN - an approximate q-learning algorithm with experience replay and target networks - and see if it works any better this way.
###Code
#XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
!bash ../xvfb start
%env DISPLAY=:1
###Output
_____no_output_____
###Markdown
__Frameworks__ - we'll accept this homework in any deep learning framework. This particular notebook was designed for tensorflow, but you will find it easy to adapt it to almost any python-based deep learning framework.
###Code
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's play some old videogamesThis time we're gonna apply approximate q-learning to an atari game called Breakout. It's not the hardest thing out there, but it's definitely way more complex than anything we tried before. Processing game image Raw atari images are large, 210x160x3 by default. However, we don't need that level of detail in order to learn them.We can thus save a lot of time by preprocessing game image, including* Resizing to a smaller shape, 64 x 64* Converting to grayscale* Cropping irrelevant image parts (top & bottom)
###Code
from gym.core import ObservationWrapper
from gym.spaces import Box
from scipy.misc import imresize
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self,env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize imported above or any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
<Your code here>
return <...>
import gym
#spawn game instance for tests
env = gym.make("BreakoutDeterministic-v0") #create raw env
env = PreprocessAtari(env)
observation_shape = env.observation_space.shape
n_actions = env.action_space.n
obs = env.reset()
#test observation
assert obs.ndim == 3, "observation must be [batch, time, channels] even if there's just one channel"
assert obs.shape == observation_shape
assert obs.dtype == 'float32'
assert len(np.unique(obs))>2, "your image must not be binary"
assert 0 <= np.min(obs) and np.max(obs) <=1, "convert image pixels to (0,1) range"
print "Formal tests seem fine. Here's an example of what you'll get."
plt.title("what your network gonna see")
plt.imshow(obs,interpolation='none',cmap='gray');
###Output
_____no_output_____
###Markdown
Frame bufferOur agent can only process one observation at a time, so we gotta make sure it contains enough information to fing optimal actions. For instance, agent has to react to moving objects so he must be able to measure object's velocity.To do so, we introduce a buffer that stores 4 last images. This time everything is pre-implemented for you.
###Code
from framebuffer import FrameBuffer
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
for _ in range(50):
obs, _, _, _ = env.step(env.action_space.sample())
plt.title("Game image")
plt.imshow(env.render("rgb_array"))
plt.show()
plt.title("Agent observation (4 frames left to right)")
plt.imshow(obs.transpose([0,2,1]).reshape([state_dim[0],-1]));
###Output
_____no_output_____
###Markdown
Building a networkWe now need to build a neural network that can map images to state q-values. This network will be called on every agent's step so it better not be resnet-152 unless you have an array of GPUs. Instead, you can use strided convolutions with a small number of features to save time and memory.You can build any architecture you want, but for reference, here's something that will more or less work: 
###Code
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
< Define your network body here. Please make sure you don't use any layers created elsewhere >
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None,] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
< apply your network layers here >
qvalues = < symbolic tensor for q-values >
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p = [1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
agent = DQNAgent("dqn_agent", state_dim, n_actions, epsilon=0.5)
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Now let's try out our agent to see if it raises any errors.
###Code
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
evaluate(env, agent, n_games=1)
###Output
_____no_output_____
###Markdown
Experience replayFor this assignment, we provide you with experience replay buffer. If you implemented experience replay buffer in last week's assignment, you can copy-paste it here __to get 2 bonus points__. The interface is fairly simple:* `exp_replay.add(obs, act, rw, next_obs, done)` - saves (s,a,r,s',done) tuple into the buffer* `exp_replay.sample(batch_size)` - returns observations, actions, rewards, next_observations and is_done for `batch_size` random samples.* `len(exp_replay)` - returns number of elements stored in replay buffer.
###Code
from replay_buffer import ReplayBuffer
exp_replay = ReplayBuffer(10)
for _ in range(30):
exp_replay.add(env.reset(), env.action_space.sample(), 1.0, env.reset(), done=False)
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(5)
assert len(exp_replay) == 10, "experience replay size should be 10 because that's what maximum capacity is"
def play_and_record(agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time
"""
# initial state
s = env.framebuffer
# Play the game for n_steps as per instructions above
<YOUR CODE>
# testing your code. This may take a minute...
exp_replay = ReplayBuffer(20000)
play_and_record(agent, env, exp_replay, n_steps=10000)
# if you're using your own experience replay buffer, some of those tests may need correction.
# just make sure you know what your code does
assert len(exp_replay) == 10000, "play_and_record should have added exactly 10000 steps, "\
"but instead added %i"%len(exp_replay)
is_dones = list(zip(*exp_replay._storage))[-1]
assert 0 < np.mean(is_dones) < 0.1, "Please make sure you restart the game whenever it is 'done' and record the is_done correctly into the buffer."\
"Got %f is_done rate over %i steps. [If you think it's your tough luck, just re-run the test]"%(np.mean(is_dones), len(exp_replay))
for _ in range(100):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(10)
assert obs_batch.shape == next_obs_batch.shape == (10,) + state_dim
assert act_batch.shape == (10,), "actions batch should have shape (10,) but is instead %s"%str(act_batch.shape)
assert reward_batch.shape == (10,), "rewards batch should have shape (10,) but is instead %s"%str(reward_batch.shape)
assert is_done_batch.shape == (10,), "is_done batch should have shape (10,) but is instead %s"%str(is_done_batch.shape)
assert [int(i) in (0,1) for i in is_dones], "is_done should be strictly True or False"
assert [0 <= a <= n_actions for a in act_batch], "actions should be within [0, n_actions]"
print("Well done!")
###Output
_____no_output_____
###Markdown
Target networksWe also employ the so called "target network" - a copy of neural network weights to be used for reference Q-values:The network itself is an exact copy of agent network, but it's parameters are not trained. Instead, they are moved here from agent's actual network every so often.$$ Q_{reference}(s,a) = r + \gamma \cdot \max _{a'} Q_{target}(s',a') $$
###Code
target_network = DQNAgent("target_network", state_dim, n_actions)
def load_weigths_into_target_network(agent, target_network):
""" assign target_network.weights variables to their respective agent.weights values. """
assigns = []
for w_agent, w_target in zip(agent.weights, target_network.weights):
assigns.append(tf.assign(w_target, w_agent, validate_shape=True))
tf.get_default_session().run(assigns)
load_weigths_into_target_network(agent, target_network)
# check that it works
sess.run([tf.assert_equal(w, w_target) for w, w_target in zip(agent.weights, target_network.weights)]);
print("It works!")
###Output
_____no_output_____
###Markdown
Learning with... Q-learningHere we write a function similar to `agent.update` from tabular q-learning.
###Code
# placeholders that will be fed with exp_replay.sample(batch_size)
obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
actions_ph = tf.placeholder(tf.int32, shape=[None])
rewards_ph = tf.placeholder(tf.float32, shape=[None])
next_obs_ph = tf.placeholder(tf.float32, shape=(None,) + state_dim)
is_done_ph = tf.placeholder(tf.float32, shape=[None])
is_not_done = 1 - is_done_ph
gamma = 0.99
###Output
_____no_output_____
###Markdown
Take q-values for actions agent just took
###Code
current_qvalues = agent.get_symbolic_qvalues(obs_ph)
current_action_qvalues = tf.reduce_sum(tf.one_hot(actions_ph, n_actions) * current_qvalues, axis=1)
###Output
_____no_output_____
###Markdown
Compute Q-learning TD error:$$ L = { 1 \over N} \sum_i [ Q_{\theta}(s,a) - Q_{reference}(s,a) ] ^2 $$With Q-reference defined as$$ Q_{reference}(s,a) = r(s,a) + \gamma \cdot max_{a'} Q_{target}(s', a') $$Where* $Q_{target}(s',a')$ denotes q-value of next state and next action predicted by __target_network__* $s, a, r, s'$ are current state, action, reward and next state respectively* $\gamma$ is a discount factor defined two cells above.
###Code
next_qvalues_target = ### YOUR CODE: compute q-values for NEXT states with target network
next_state_values_target = ### YOUR CODE: compute state values by taking max over next_qvalues_target for all actions
reference_qvalues = ### YOUR CODE: compute Q_reference(s,a) as per formula above
# Define loss function for sgd.
td_loss = (current_action_qvalues - reference_qvalues) ** 2
td_loss = tf.reduce_mean(td_loss)
train_step = tf.train.AdamOptimizer(1e-3).minimize(td_loss, var_list=agent.weights)
sess.run(tf.global_variables_initializer())
for chk_grad in tf.gradients(reference_qvalues, agent.weights):
error_msg = "Reference q-values should have no gradient w.r.t. agent weights. Make sure you used target_network qvalues! "
error_msg += "If you know what you're doing, ignore this assert."
assert chk_grad is None or np.allclose(sess.run(chk_grad), sess.run(chk_grad * 0)), error_msg
assert tf.gradients(reference_qvalues, is_not_done)[0] is not None, "make sure you used is_not_done"
assert tf.gradients(reference_qvalues, rewards_ph)[0] is not None, "make sure you used rewards"
assert tf.gradients(reference_qvalues, next_obs_ph)[0] is not None, "make sure you used next states"
assert tf.gradients(reference_qvalues, obs_ph)[0] is None, "reference qvalues shouldn't depend on current observation!" # ignore if you're certain it's ok
print("Splendid!")
###Output
_____no_output_____
###Markdown
Main loopIt's time to put everything together and see if it learns anything.
###Code
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from pandas import ewma
%matplotlib inline
mean_rw_history = []
td_loss_history = []
exp_replay = ReplayBuffer(10**5)
play_and_record(agent, env, exp_replay, n_steps=10000)
def sample_batch(exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
obs_ph:obs_batch, actions_ph:act_batch, rewards_ph:reward_batch,
next_obs_ph:next_obs_batch, is_done_ph:is_done_batch
}
for i in trange(10**5):
# play
play_and_record(agent, env, exp_replay, 10)
# train
_, loss_t = sess.run([train_step, td_loss], sample_batch(exp_replay, batch_size=64))
td_loss_history.append(loss_t)
# adjust agent parameters
if i % 500 == 0:
load_weigths_into_target_network(agent, target_network)
agent.epsilon = max(agent.epsilon * 0.99, 0.01)
mean_rw_history.append(evaluate(make_env(), agent, n_games=3))
if i % 100 == 0:
clear_output(True)
print("buffer size = %i, epsilon = %.5f" % (len(exp_replay), agent.epsilon))
plt.subplot(1,2,1)
plt.title("mean reward per game")
plt.plot(mean_rw_history)
plt.grid()
assert not np.isnan(loss_t)
plt.figure(figsize=[12, 4])
plt.subplot(1,2,2)
plt.title("TD loss history (moving average)")
plt.plot(pd.ewma(np.array(td_loss_history), span=100, min_periods=100))
plt.grid()
plt.show()
assert np.mean(mean_rw_history[-10:]) > 10.
print("That's good enough for tutorial.")
###Output
_____no_output_____
###Markdown
__ How to interpret plots: __This aint no supervised learning so don't expect anything to improve monotonously. * __ TD loss __ is the MSE between agent's current Q-values and target Q-values. It may slowly increase or decrease, it's ok. The "not ok" behavior includes going NaN or stayng at exactly zero before agent has perfect performance.* __ mean reward__ is the expected sum of r(s,a) agent gets over the full game session. It will oscillate, but on average it should get higher over time (after a few thousand iterations...). * In basic q-learning implementation it takes 5-10k steps to "warm up" agent before it starts to get better.* __ buffer size__ - this one is simple. It should go up and cap at max size.* __ epsilon__ - agent's willingness to explore. If you see that agent's already at 0.01 epsilon before it's average reward is above 0 - __ it means you need to increase epsilon__. Set it back to some 0.2 - 0.5 and decrease the pace at which it goes down.* Also please ignore first 100-200 steps of each plot - they're just oscillations because of the way moving average works.At first your agent will lose quickly. Then it will learn to suck less and at least hit the ball a few times before it loses. Finally it will learn to actually score points.__Training will take time.__ A lot of it actually. An optimistic estimate is to say it's gonna start winning (average reward > 10) after 10k steps. But hey, look on the bright side of things: Video
###Code
agent.epsilon=0 # Don't forget to reset epsilon back to previous value if you want to go on training
#record sessions
import gym.wrappers
env_monitor = gym.wrappers.Monitor(make_env(),directory="videos",force=True)
sessions = [evaluate(env_monitor, agent, n_games=1) for _ in range(100)]
env_monitor.close()
#show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s:s.endswith(".mp4"),os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) #this may or may not be _last_ video. Try other indices
###Output
_____no_output_____ |
how-to-use-azureml/training/train-on-local/train-on-local.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core import Environment
# Editing a run configuration property on-fly.
user_managed_env = Environment("user-managed-env")
user_managed_env.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#user_managed_env.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', environment=user_managed_env)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
system_managed_env = Environment("system-managed-env")
system_managed_env.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
system_managed_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src.run_config.environment = system_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
docker_env = Environment("docker-env")
docker_env.python.user_managed_dependencies = False
docker_env.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
print(docker_env.docker.base_image)
# Specify conda dependencies with scikit-learn
docker_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
src.run_config.environment = docker_env
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.```pythoncustom_docker_env = Environment("custom-docker-env")custom_docker_env.docker.enabled = True```You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationcustom_docker_env.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registrycustom_docker_env.docker.base_image = "mycustomimage:1.0"custom_docker_env.docker.base_image_registry.address = "myregistry.azurecr.io"custom_docker_env.docker.base_image_registry.username = "username"custom_docker_env.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythoncustom_docker_env.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagecustom_docker_env.python.interpreter_path = '/opt/conda/bin/python'```Once you are done defining your environment, set that environment on your run configuration:```pythonsrc.run_config.environment = custom_docker_env``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Use a custom Docker imageYou can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.```python use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3" or, use an image available in a private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"```When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the earlier queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enables
if subprocess.run("docker -v", shell=True) == 0:
out = subprocess.check_output("docker system info", shell=True, encoding="ascii").split("\n")
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Use a custom Docker imageYou can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.```python use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3" or, use an image available in a private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"```When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [Configuration](../../../configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/sdk2/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enables
if subprocess.run("docker -v", shell=True) == 0:
out = subprocess.check_output("docker system info", shell=True, encoding="ascii").split("\n")
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enables
if subprocess.run("docker -v", shell=True) == 0:
out = subprocess.check_output("docker system info", shell=True, encoding="ascii").split("\n")
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Use a custom Docker imageYou can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.```python use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3" or, use an image available in a private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"```When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/sdk2/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enables
if subprocess.run("docker -v", shell=True) == 0:
out = subprocess.check_output("docker system info", shell=True, encoding="ascii").split("\n")
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core import Environment
# Editing a run configuration property on-fly.
user_managed_env = Environment("user-managed-env")
user_managed_env.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#user_managed_env.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py')
src.run_config.environment = user_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
system_managed_env = Environment("system-managed-env")
system_managed_env.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
system_managed_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src.run_config.environment = system_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
docker_env = Environment("docker-env")
docker_env.python.user_managed_dependencies = False
docker_env.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
print(docker_env.docker.base_image)
# Specify conda dependencies with scikit-learn
docker_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
src.run_config.environment = docker_env
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.environment.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE # Reference Docker image
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesIf you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Use a custom Docker imageYou can also specify a custom Docker image if you don't want to use the default image provided by Azure ML.```python use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3" or, use an image available in a private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"```When you are using a custom Docker image, you might already have your environment setup properly in a Python environment in the Docker image. In that case, you can skip specifying conda dependencies, and just use `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the earlier queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core import Environment
# Editing a run configuration property on-fly.
user_managed_env = Environment("user-managed-env")
user_managed_env.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#user_managed_env.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py')
src.run_config.environment = user_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
system_managed_env = Environment("system-managed-env")
system_managed_env.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
system_managed_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src.run_config.environment = system_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
docker_env = Environment("docker-env")
docker_env.python.user_managed_dependencies = False
docker_env.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
print(docker_env.docker.base_image)
# Specify conda dependencies with scikit-learn
docker_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
src.run_config.environment = docker_env
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core import Environment
# Editing a run configuration property on-fly.
user_managed_env = Environment("user-managed-env")
user_managed_env.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#user_managed_env.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py')
src.run_config.environment = user_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
system_managed_env = Environment("system-managed-env")
system_managed_env.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
system_managed_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src.run_config.environment = system_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
docker_env = Environment("docker-env")
docker_env.python.user_managed_dependencies = False
docker_env.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
print(docker_env.docker.base_image)
# Specify conda dependencies with scikit-learn
docker_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
src.run_config.environment = docker_env
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  02. Train locally_**Train a model locally: Directly on your machine and within a Docker container**_--- Table of contents1. [Introduction](intro)1. [Pre-requisites](pre-reqs)1. [Initialize Workspace](init)1. [Create An Experiment](exp)1. [View training and auxiliary scripts](view)1. [Configure & Run](config-run) 1. User-managed environment 1. Set the environment up 1. Submit the script to run in the user-managed environment 1. Get run history details 1. System-managed environment 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Docker-based execution 1. Set the environment up 1. Submit the script to run in the system-managed environment 1. Get run history details 1. Use a custom Docker image1. [Query run metrics](query)--- 1. Introduction In this notebook, we will learn how to:* Connect to our AML workspace* Create or load a workspace* Configure & execute a local run in: - a user-managed Python environment - a system-managed Python environment - a Docker environment* Query run metrics to find the best model trained in the run* Register that model for operationalization 2. Pre-requisites In this notebook, we assume that you have set your Azure Machine Learning workspace. If you have not, make sure you go through the [configuration notebook](../../../configuration.ipynb) first. In the end, you should have configuration file that contains the subscription ID, resource group and name of your workspace.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
3. Initialize Workspace Initialize your workspace object from configuration file
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
4. Create An Experiment An experiment is a logical container in an Azure ML Workspace. It contains a series of trials called `Runs`. As such, it hosts run records such as run metrics, logs, and other output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
5. View training and auxiliary scripts For convenience, we already created the training (`train.py`) script and supportive libraries (`mylib.py`) for you. Take a few minutes to examine both files.
###Code
with open('./train.py', 'r') as f:
print(f.read())
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
6. Configure & Run 6.A User-managed environment 6.A.a Set the environment upWhen using a user-managed environment, you are responsible for ensuring that all the necessary packages are available in the Python environment you choose to run the script in.
###Code
from azureml.core import Environment
# Editing a run configuration property on-fly.
user_managed_env = Environment("user-managed-env")
user_managed_env.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#user_managed_env.python.interpreter_path = '/home/johndoe/miniconda3/envs/myenv/bin/python'
###Output
_____no_output_____
###Markdown
6.A.b Submit the script to run in the user-managed environmentWhatever the way you manage your environment, you need to use the `ScriptRunConfig` class. It allows you to further configure your run by pointing to the `train.py` script and to the working directory, which also contains the `mylib.py` file. These inputs indeed provide the commands to execute in the run. Once the run is configured, you submit it to your experiment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py')
src.run_config.environment = user_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.A.c Get run history detailsWhile all calculations were run on your machine (cf. below), by using a `run` you also captured the results of your calculations into your run and experiment. You can then see them on the Azure portal, through the link displayed as output of the following cell.**Note**: The recording of the computation results into your run was made possible by the `run.log()` commands in the `train.py` file.
###Code
run
###Output
_____no_output_____
###Markdown
Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run). Block any execution to wait until the run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
**Note:** All these calculations were run on your local machine, in the conda environment you defined above. You can find the results in:- `~/.azureml/envs/azureml_xxxx` for the conda environment you just created- `~/AppData/Local/Temp/azureml_runs/train-on-local_xxxx` for the machine learning models you trained (this path may differ depending on the platform you use). This folder also contains - Logs (under azureml_logs/) - Output pickled files (under outputs/) - The configuration files (credentials, local and docker image setups) - The train.py and mylib.py scripts - The current notebookTake a few minutes to examine the output of the cell above. It shows the content of some of the log files, and extra information on the conda environment used. 6.B System-managed environment 6.B.a Set the environment upNow, instead of managing the setup of the environment yourself, you can ask the system to build a new conda environment for you. The environment is built once, and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.conda_dependencies import CondaDependencies
system_managed_env = Environment("system-managed-env")
system_managed_env.python.user_managed_dependencies = False
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
system_managed_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.B.b Submit the script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 minutes.The commands used to execute the run are then the same as the ones you used above.
###Code
src.run_config.environment = system_managed_env
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
6.B.c Get run history details
###Code
run
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
6.C Docker-based executionIn this section, you will train the same models, but you will do so in a Docker container, on your local machine. For this, you then need to have the Docker engine installed locally. If you don't have it yet, please follow the instructions below. How to install Docker- [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/)- [MacOs](https://docs.docker.com/docker-for-mac/install/)- [Windows](https://docs.docker.com/docker-for-windows/install/) In case of issues, troubleshooting documentation can be found [here](https://docs.docker.com/docker-for-windows/troubleshoot/running-docker-for-windows-in-nested-virtualization-scenarios). Additionally, you can follow the steps below, if Virtualization is not enabled on your machine: - Go to Task Manager > Performance - Check that Virtualization is enabled - If it is not, go to `Start > Settings > Update and security > Recovery > Advanced Startup - Restart now > Troubleshoot > Advanced options > UEFI firmware settings - restart` - In the BIOS, go to `Advanced > System options > Click the "Virtualization Technology (VTx)" only > Save > Exit > Save all changes` -- This will restart the machine**Notes**: - If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.- If you use a GPU base image, it needs to be used on Microsoft Azure Services such as ACI, AML Compute, Azure VMs, or AKS.You can also ask the system to pull down a Docker image and execute your scripts in it. 6.C.a Set the environment upIn the cell below, you will configure your run to execute in a Docker container. It will:- run on a CPU- contain a conda environment in which the scikit-learn library will be installed.As before, you will finish configuring your run by pointing to the `train.py` and `mylib.py` files.
###Code
docker_env = Environment("docker-env")
docker_env.python.user_managed_dependencies = False
docker_env.docker.enabled = True
# use the default CPU-based Docker image from Azure ML
print(docker_env.docker.base_image)
# Specify conda dependencies with scikit-learn
docker_env.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
6.C.b Submit the script to run in the system-managed environmentThe run is now configured and ready to be executed in a Docker container. If you are running this for the first time, the Docker container will get created, as well as the conda environment inside it. This will take several minutes. Once all this is generated, however, this conda environment will be reused as long as you don't change the conda dependencies.
###Code
import subprocess
src.run_config.environment = docker_env
# Check if Docker is installed and Linux containers are enabled
if subprocess.run("docker -v", shell=True).returncode == 0:
out = subprocess.check_output("docker system info", shell=True).decode('ascii')
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine is not installed.")
###Output
_____no_output_____
###Markdown
Potential issue on Windows and how to solve itIf you are using a Windows machine, the creation of the Docker image may fail, and you may see the following error message`docker: Error response from daemon: Drive has not been shared. Failed to launch docker container. Check that docker is running and that C:\ on Windows and /tmp elsewhere is shared.`This is because the process above tries to create a linux-based, i.e. non-windows-based, Docker image. To fix this, you can:- Open the Docker user interface- Navigate to Settings > Shared drives- Select C (or both C and D, if you have one)- ApplyWhen this is done, you can try and re-run the command above. 6.C.c Get run history details
###Code
# Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
The results obtained here should be the same as those obtained before. However, take a look at the "Execution summary" section in the output of the cell above. Look for "docker". There, you should see the "enabled" field set to True. Compare this to the 2 prior runs ("enabled" was then set to False). 6.C.d Use a custom Docker imageYou can also specify a custom Docker image, if you don't want to use the default image provided by Azure ML.You can either pull an image directly from Anaconda:```python Use an image available in Docker Hub without authenticationrun_config_docker.environment.docker.base_image = "continuumio/miniconda3"```Or one of the images you may already have created:```python or, use an image available in your private Azure Container Registryrun_config_docker.environment.docker.base_image = "mycustomimage:1.0"run_config_docker.environment.docker.base_image_registry.address = "myregistry.azurecr.io"run_config_docker.environment.docker.base_image_registry.username = "username"run_config_docker.environment.docker.base_image_registry.password = "password"``` Where to find my Docker image name and registry credentials If you do not know what the name of your Docker image or container registry is, or if you don't know how to access the username and password needed above, proceed as follows: - Docker image name: - In the portal, under your resource group, click on your current workspace - Click on Experiments - Click on Images - Click on the image of your choice - Copy the "ID" string - In this notebook, replace "mycustomimage:1/0" with that ID string - Username and password: - In the portal, under your resource group, click on the container registry associated with your workspace - If you have several and don't know which one you need, click on your workspace, go to Overview and click on the "Registry" name on the upper right of the screen - There, go to "Access keys" - Copy the username and one of the passwords - In this notebook, replace "username" and "password" by these valuesIn any case, you will need to use the lines above in place of the line marked as ` Reference Docker image` in section 6.C.a. When you are using your custom Docker image, you might already have your Python environment properly set up. In that case, you can skip specifying conda dependencies, and just use the `user_managed_dependencies` option instead:```pythonrun_config_docker.environment.python.user_managed_dependencies = True path to the Python environment in the custom Docker imagerun_config.environment.python.interpreter_path = '/opt/conda/bin/python'``` 7. Query run metrics Once your run has completed, you can now extract the metrics you captured by using the `get_metrics` method. As shown in the `train.py` file, these metrics are "alpha" and "mse".
###Code
# Get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
Let's compare it to the others
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.plot(metrics['alpha'], metrics['mse'], marker='o')
plt.ylabel("MSE")
plt.xlabel("Alpha")
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
From the results obtained above, `ridge_0.40.pkl` is the best performing model. You can now register that particular model with the workspace. Once you have done so, go back to the portal and click on "Models". You should see it there.
###Code
# Supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print("Registered model:\n --> Name: {}\n --> Version: {}\n --> URL: {}".format(model.name, model.version, model.url))
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 02. Train locally* Create or load workspace.* Create scripts locally.* Create `train.py` in a folder, along with a `my.lib` file.* Configure & execute a local run in a user-managed Python environment.* Configure & execute a local run in a system-managed Python environment.* Configure & execute a local run in a Docker environment.* Query run metrics to find the best model* Register model for operationalization. PrerequisitesMake sure you go through the [configuration notebook](../../../configuration.ipynb) first if you haven't.
###Code
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize WorkspaceInitialize a workspace object from persisted configuration.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
###Output
_____no_output_____
###Markdown
Create An Experiment**Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
###Code
from azureml.core import Experiment
experiment_name = 'train-on-local'
exp = Experiment(workspace=ws, name=experiment_name)
###Output
_____no_output_____
###Markdown
View `train.py``train.py` is already created for you.
###Code
with open('./train.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Note `train.py` also references a `mylib.py` file.
###Code
with open('./mylib.py', 'r') as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Configure & Run User-managed environmentBelow, we use a user-managed run, which means you are responsible to ensure all the necessary packages are available in the Python environment you choose to run the script.
###Code
from azureml.core.runconfig import RunConfiguration
# Editing a run configuration property on-fly.
run_config_user_managed = RunConfiguration()
run_config_user_managed.environment.python.user_managed_dependencies = True
# You can choose a specific Python environment by pointing to a Python path
#run_config.environment.python.interpreter_path = '/home/johndoe/miniconda3/envs/sdk2/bin/python'
###Output
_____no_output_____
###Markdown
Submit script to run in the user-managed environmentNote whole script folder is submitted for execution, including the `mylib.py` file.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory='./', script='train.py', run_config=run_config_user_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block to wait till run finishes.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
System-managed environmentYou can also ask the system to build a new conda environment and execute your scripts in it. The environment is built once and will be reused in subsequent executions as long as the conda dependencies remain unchanged.
###Code
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
run_config_system_managed = RunConfiguration()
run_config_system_managed.environment.python.user_managed_dependencies = False
run_config_system_managed.auto_prepare_environment = True
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_system_managed.environment.python.conda_dependencies = cd
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_system_managed)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
Get run history details
###Code
run
###Output
_____no_output_____
###Markdown
Block and wait till run finishes.
###Code
run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Docker-based execution**IMPORTANT**: You must have Docker engine installed locally in order to use this execution mode. If your kernel is already running in a Docker container, such as **Azure Notebooks**, this mode will **NOT** work.NOTE: The GPU base image must be used on Microsoft Azure Services only such as ACI, AML Compute, Azure VMs, and AKS.You can also ask the system to pull down a Docker image and execute your scripts in it.
###Code
run_config_docker = RunConfiguration()
run_config_docker.environment.python.user_managed_dependencies = False
run_config_docker.auto_prepare_environment = True
run_config_docker.environment.docker.enabled = True
run_config_docker.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
# Specify conda dependencies with scikit-learn
cd = CondaDependencies.create(conda_packages=['scikit-learn'])
run_config_docker.environment.python.conda_dependencies = cd
src = ScriptRunConfig(source_directory="./", script='train.py', run_config=run_config_docker)
###Output
_____no_output_____
###Markdown
Submit script to run in the system-managed environmentA new conda environment is built based on the conda dependencies object. If you are running this for the first time, this might take up to 5 mninutes. But this conda environment is reused so long as you don't change the conda dependencies.
###Code
import subprocess
# Check if Docker is installed and Linux containers are enables
if subprocess.run("docker -v", shell=True) == 0:
out = subprocess.check_output("docker system info", shell=True, encoding="ascii").split("\n")
if not "OSType: linux" in out:
print("Switch Docker engine to use Linux containers.")
else:
run = exp.submit(src)
else:
print("Docker engine not installed.")
#Get run history details
run
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Query run metrics
###Code
# get all metris logged in the run
run.get_metrics()
metrics = run.get_metrics()
###Output
_____no_output_____
###Markdown
Let's find the model that has the lowest MSE value logged.
###Code
import numpy as np
best_alpha = metrics['alpha'][np.argmin(metrics['mse'])]
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
min(metrics['mse']),
best_alpha
))
###Output
_____no_output_____
###Markdown
You can also list all the files that are associated with this run record
###Code
run.get_file_names()
###Output
_____no_output_____
###Markdown
We know the model `ridge_0.40.pkl` is the best performing model from the eariler queries. So let's register it with the workspace.
###Code
# supply a model name, and the full path to the serialized model file.
model = run.register_model(model_name='best_ridge_model', model_path='./outputs/ridge_0.40.pkl')
print(model.name, model.version, model.url)
###Output
_____no_output_____ |
Examples/Molecular_Dynamics.ipynb | ###Markdown
Molecular DynamicsThis module is still under development but will allow for the fast simulation of molecular dynamics problems. This module is developed to serve as a learning tool although the code used was developed through the understanding and reading of "The Art of Molecular Dynamics" by D.C. Rapaport. All backend code was written in native C and joined to Python through the use of CPython. This allows for the ease of use, seen below, as only python implementations are required, but the speed of native C is present.Below we simply import the module and call the Molecular_Dynamics class. What happens under the hood here is as follows: An input file is created in the local directory of the python script with all inputs, called input.txt. Then, the C code is called which will read from the input file and perform the simulation. Output is continually written to the specified output file in the same directory as the python script. If the script is run from the command line, prinout=True will provide live readouts of the stage of the simulation. Running from anywhere else will render the printout command useless as the printout is called within the C object file.
###Code
import NuclearTools.Molecular_Dynamics as MD
obj = MD.Simulate2D(deltaT = .005,
density = 0.8,
initUcellx = 20,
initUcelly = 20,
stepAvg = 100,
stepEquil = 0,
stepLimit = 10000,
temperature = 1,
limitVel = 4,
rangeVel = 3,
sizeHistVel = 100,
stepVel = 5,
randSeedP = 17,
printout = True,
outfile='2D_case_test')
###Output
_____no_output_____
###Markdown
We have performed the above simulation of a 20 x 20 2D box consisting of 400 atoms. All output has been written to 2D_case_test.txt and now using the plotting functions of the class, we can easily parse the ouput and plot our results.Firstly, below we can call fplot for any of 'Kinetic Energy', 'Total Energy', 'Potential Energy', 'Temperature', 'Momentum', 'Pressure', 'Total Energy stdv', 'Kinetic Energy stdv' or 'Pressure stdv'. Lmean to true will calculate the mean value of each property and draw a horizontal line through that property. fplot_mult takes a list of properties to plot. fplot_ravg calculates the running average of the specified properties and plots them.
###Code
obj.fplot('Kinetic Energy', Lmean=True)
obj.fplot_mult(['Kinetic Energy', 'Momentum'])
obj.fplot_ravg(['Momentum', 'Total Energy', 'Kinetic Energy', 'Pressure', 'Potential Energy', 'Temperature'])
###Output
_____no_output_____
###Markdown
We can then use the so called h-function to observe the convergence of our simulation. The increments specifier dictates what iterations to plot for the probability distribution below and the lower_r, up_r are the lower and upper boundaries of the iterations to plot. Currently, these bounds are simply determined by your simulation input and may require some guesswork to observe what you intend.
###Code
obj.plot_prob_vs_vel(increments=[0,1,2,10])
obj.plot_hfun_vs_time(low_r=0, up_r=10)
###Output
_____no_output_____
###Markdown
Next we will perform a 3D simulation using the 'Cell_Neighbor' method. Viable methods also include all-pairs, cell-list, and neighbor-list. It is reccomended to use 'Cell-Neighbor' for speed.
###Code
obj = MD.Simulate3D(deltaT = .001,
density = 1.2,
temperature = 0.4,
rCut = 3,
initUcellx = 5,
initUcelly = 5,
initUcellz = 5,
nebrTabFac = 100,
rNebrShell = 0.4,
stepAvg = 100,
stepEquil = 5000,
stepLimit = 10000,
stepAdjustTemp = 10,
limitVel = 4,
rangeVel = 3,
sizeHistVel = 100,
stepVel = 5,
randSeedP = 17,
method = 'Cell-Neighbor',
printout = True,
outfile='3D_case_test')
###Output
_____no_output_____
###Markdown
The plotting is performed as before which can be seen below.
###Code
obj.fplot('Kinetic Energy', Lmean=True)
obj.fplot_mult(['Kinetic Energy', 'Momentum'])
obj.fplot_ravg(['Momentum', 'Total Energy', 'Kinetic Energy', 'Pressure', 'Potential Energy', 'Temperature'])
obj.fplot_ravg(['Kinetic Energy stdv'])
obj.fplot_ravg(['Total Energy stdv'])
obj.fplot_ravg(['Pressure stdv'])
obj.plot_prob_vs_vel(increments=[0,1,2,5])
obj.plot_hfun_vs_time(low_r=0, up_r=10)
###Output
_____no_output_____
###Markdown
Many time it is more efficient and more organized to have prebuilt input files and to simply run them all. Then one can load them into an object to plot or read information. Given below is the RunCases3D function with the 2D case simply being RunCases2D. The ReadfromOutput() class works for both.
###Code
MD.RunCases3D(['input_1.txt', 'input_2.txt'])
obj = MD.ReadfromOutput(['3D_1.txt', '3D_2.txt'])
###Output
_____no_output_____
###Markdown
Plotting is the same as before and without specifying an output_list in the plotting functions, every file from ReadfromOuput will be plotted. To only plot certain files specify a list of files through this parameter.
###Code
obj.fplot('Kinetic Energy', Lmean=True, output_list=['3D_1.txt'])
obj.fplot_mult(['Kinetic Energy', 'Momentum'])
obj.fplot_ravg(['Momentum', 'Total Energy', 'Kinetic Energy', 'Pressure', 'Potential Energy', 'Temperature'])
obj.fplot_ravg(['Kinetic Energy stdv'], output_list=['3D_1.txt'])
obj.fplot_ravg(['Total Energy stdv'])
obj.fplot_ravg(['Pressure stdv'])
###Output
_____no_output_____
###Markdown
To view the actual numerical values of the simulation, the following ways can be used. There are also two ways to call values. Through the direct names or a dictionary using the full name. This allows for ease of use and rememberance. All properties given above as viable for plotting can be directly called here.
###Code
print('3D_1 Kinetic E: ', obj.outputs['3D_1.txt'].values['Kinetic Energy'][0:5])
print('3D_1 Kinetic E: ', obj.outputs['3D_1.txt'].kinenergy[0:5])
print('3D_2 Total E stdv: ', obj.outputs['3D_2.txt'].values['Total Energy stdv'][0:5])
print('3D_2 Total E stdv: ', obj.outputs['3D_2.txt'].sig_totenergy[0:5])
###Output
3D_1 Kinetic E: [0.686 0.7552 0.7644 0.7487 0.7495]
3D_1 Kinetic E: [0.686 0.7552 0.7644 0.7487 0.7495]
3D_2 Total E stdv: [0.363 0.1498 0.0231 0.0156 0.0177]
3D_2 Total E stdv: [0.363 0.1498 0.0231 0.0156 0.0177]
|
docs/guide.ipynb | ###Markdown
plot_likert guideWelcome! This notebook aims to introduce you to the usage and options of plot_likert. Have questions or suggestions for improvement? Feel free to open an issue! Prerequisites First, obviously, you'll need plot_likert itself. See homepage for installation instructions.
###Code
import plot_likert
###Output
_____no_output_____
###Markdown
plot_likert operates on [Pandas](https://pandas.pydata.org/) [DataFrames](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html), so you'll need Pandas as well.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Pandas uses [NumPy](https://numpy.org/) under the hood. You won't need it directly, but this notebook will use it for a couple of things, like random number generation.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Quick startIf you have the data in the right format, you can make a plot with just one line of code! So, let's get some data:
###Code
rng = np.random.default_rng(seed=42)
data = pd.DataFrame(rng.choice(plot_likert.scales.agree, (10,2)), columns=['Q1','Q2'])
###Output
_____no_output_____
###Markdown
and now, the magic happens:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
InputsNow you know how easy it can be to start using plot_likert. But we said that the data has to be in the right format. What does that mean exactly? Let's take a look at what we passed in to the function:
###Code
data
###Output
_____no_output_____
###Markdown
To make our assumptions explicit:1. The input has to be a [Pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)2. Each row represents a response from a single respondent3. Each cell contains their response, preferably as a string4. Each column represents a different question asked to each respondent ScalesThe second input to the `plot_likert` function is the _scale_ you're using for your questions,i.e., your questions' answer choices. Here's what it looked like for the dataset above:
###Code
plot_likert.scales.agree
###Output
_____no_output_____
###Markdown
You need to specify the scale explicitly because plot_likert needs to know the order of the scale (for colors and sorting), and in case there are any values in the scale that aren't represented in your dataset. The scale is an array of strings, so you can construct one yourself.
###Code
another_scale = \
['strongly disagree',
'disagree',
'neither agree nor disagree',
'agree',
'strongly agree']
###Output
_____no_output_____
###Markdown
Scale must match inputs exactlyThe scale you pass in must match the data exactly, otherwise a `PlotLikertError` is raised.For example, the scale we just constructed has the fields in lower-case, but the data has the words capitalized. Trying to plot now throws an exception.
###Code
try:
plot_likert.plot_likert(data, another_scale);
print("Yay, everything worked!")
except plot_likert.PlotLikertError as e:
import sys
print("Oh no, something went wrong! The message in the exception is:\n" + str(e), file=sys.stderr)
###Output
Oh no, something went wrong! The message in the exception is:
A response was found with value `Strongly disagree`, which is not one of the values in the provided scale: ['strongly disagree', 'disagree', 'neither agree nor disagree', 'agree', 'strongly agree']. If this is unexpected, you might want to double-check for extra whitespace, capitalization, spelling, or type (int versus str).
###Markdown
Bundled scalesFor your convenience, plot_likert [includes some commonly used scales](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/scales.py), for example:
###Code
plot_likert.scales.acceptable
plot_likert.scales.raw5
###Output
_____no_output_____
###Markdown
If you'd like to add a scale, please open a pull request. Missing dataIf not all of your respondents answered every question, you might have empty cells.This works fine.However, you'll get [a warning](https://docs.python.org/3/library/warnings.html) if you're plotting percentages (see below).
###Code
missing_data = data.copy()
missing_data.iloc[0,0] = np.NaN
# This produces a warning:
#plot_likert.plot_likert(missing_data, plot_likert.scales.agree, plot_percentage=True);
###Output
_____no_output_____
###Markdown
Plotting percentagesOften, instead of plotting the raw number of responses, you'll want to plot the percentage of respondents who answered a certain way. You can do this by setting the argument `plot_percentage=True`:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
plot_percentage=True);
###Output
_____no_output_____
###Markdown
Customizing colorsYou can change the colors used in the plot by passing in an array of color values as the `colors` argument:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
colors=plot_likert.colors.likert5);
###Output
_____no_output_____
###Markdown
This is mandatory if you're using a scale that has more than 5 values.Some default color schemes [are provided under `plot_likert.colors`](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/colors.py).A color scheme is just an array of [matplotlib color values](https://matplotlib.org/tutorials/colors/colors.html), so you can also construct your own. Changing the figure sizeThe default size of the plot can be pretty cramped. You can adjust the figure size using the `figsize` argument. This specifies the dimensions of the figure in inches. This argument is passed directly to [matplotlib](https://matplotlib.org), so [see its documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.figure.html) for any questions.
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
figsize=(10,3));
###Output
_____no_output_____
###Markdown
You can do more advanced customization too: Controlling the plotThe plotting function returns a [matplotlib Axes object](https://matplotlib.org/api/axes_api.htmlmatplotlib.axes.Axes), which you can use to customize the figure, just as you would with any other matplotlib plot.Here's a quick example:
###Code
ax = plot_likert.plot_likert(data, plot_likert.scales.agree)
ax.figure.set_size_inches(8, 2)
ax.xaxis.set_label_text('# of awesome people');
ax.set_yticklabels(['Second question', 'First question']);
###Output
_____no_output_____
###Markdown
Plotting already-aggregated dataThe `plot_likert` function we've been using operates on "raw" responses:a DataFrame that has a row for each response (as discussed above).However, it's possible your data has a different shape,and/or you have already aggregated the data,and just want to plot it.plot_likert provides a solution for this too!Just take your aggregated counts:
###Code
precomputed_counts = pd.DataFrame(
{'Strongly disagree': {'Q1': 2.0, 'Q2': 2.0},
'Disagree': {'Q1': 1.0, 'Q2': 0.0},
'Neither agree nor disagree': {'Q1': 3.0, 'Q2': 2.0},
'Agree': {'Q1': 3.0, 'Q2': 4.0},
'Strongly agree': {'Q1': 1.0, 'Q2': 2.0}}
)
precomputed_counts
###Output
_____no_output_____
###Markdown
…and pass them to the `plot_counts` function:
###Code
plot_likert.plot_counts(precomputed_counts, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
plot_likert guideWelcome! This notebook aims to introduce you to the usage and options of plot_likert. Have questions or suggestions for improvement? Feel free to open an issue! Prerequisites First, obviously, you'll need plot_likert itself. See homepage for installation instructions.
###Code
import plot_likert
###Output
_____no_output_____
###Markdown
plot_likert operates on [Pandas](https://pandas.pydata.org/) [DataFrames](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html), so you'll need Pandas as well.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Pandas uses [NumPy](https://numpy.org/) under the hood. You won't need it directly, but this notebook will use it for a couple of things, like random number generation.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Quick startIf you have the data in the right format, you can make a plot with just one line of code! So, let's get some data:
###Code
rng = np.random.default_rng(seed=42)
data = pd.DataFrame(rng.choice(plot_likert.scales.agree, (10,2)), columns=['Q1','Q2'])
###Output
_____no_output_____
###Markdown
and now, the magic happens:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
InputsNow you know how easy it can be to start using plot_likert. But we said that the data has to be in the right format. What does that mean exactly? Let's take a look at what we passed in to the function:
###Code
data
###Output
_____no_output_____
###Markdown
To make our assumptions explicit:1. The input has to be a [Pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)2. Each row represents a response from a single respondent3. Each cell contains their response, preferably as a string4. Each column represents a different question asked to each respondent ScalesThe second input to the `plot_likert` function is the _scale_ you're using for your questions,i.e., your questions' answer choices. Here's what it looked like for the dataset above:
###Code
plot_likert.scales.agree
###Output
_____no_output_____
###Markdown
You need to specify the scale explicitly because plot_likert needs to know the order of the scale (for colors and sorting), and in case there are any values in the scale that aren't represented in your dataset. The scale is an array of strings, so you can construct one yourself.
###Code
another_scale = \
['strongly disagree',
'disagree',
'neither agree nor disagree',
'agree',
'strongly agree']
###Output
_____no_output_____
###Markdown
Scale must match inputs exactlyThe scale you pass in must match the data exactly, otherwise a `PlotLikertError` is raised.For example, the scale we just constructed has the fields in lower-case, but the data has the words capitalized. Trying to plot now throws an exception.
###Code
try:
plot_likert.plot_likert(data, another_scale);
print("Yay, everything worked!")
except plot_likert.PlotLikertError as e:
import sys
print("Oh no, something went wrong! The message in the exception is:\n" + str(e), file=sys.stderr)
###Output
Oh no, something went wrong! The message in the exception is:
A response was found with value `Strongly disagree`, which is not one of the values in the provided scale: ['strongly disagree', 'disagree', 'neither agree nor disagree', 'agree', 'strongly agree']. If this is unexpected, you might want to double-check for extra whitespace, capitalization, spelling, or type (int versus str).
###Markdown
Bundled scalesFor your convenience, plot_likert [includes some commonly used scales](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/scales.py), for example:
###Code
plot_likert.scales.acceptable
plot_likert.scales.raw5
###Output
_____no_output_____
###Markdown
If you'd like to add a scale, please open a pull request. Missing dataIf not all of your respondents answered every question, you might have empty cells.This works fine.However, you'll get [a warning](https://docs.python.org/3/library/warnings.html) if you're plotting percentages (see below).
###Code
missing_data = data.copy()
missing_data.iloc[0,0] = np.NaN
# This produces a warning:
#plot_likert.plot_likert(missing_data, plot_likert.scales.agree, plot_percentage=True);
###Output
_____no_output_____
###Markdown
Plotting percentagesOften, instead of plotting the raw number of responses, you'll want to plot the percentage of respondents who answered a certain way. You can do this by setting the argument `plot_percentage=True`:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
plot_percentage=True);
###Output
_____no_output_____
###Markdown
Customizing colorsYou can change the colors used in the plot by passing in an array of color values as the `colors` argument:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
colors=plot_likert.colors.likert5);
###Output
_____no_output_____
###Markdown
This is mandatory if you're using a scale that has more than 5 values.Some default color schemes [are provided under `plot_likert.colors`](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/colors.py).A color scheme is just an array of [matplotlib color values](https://matplotlib.org/tutorials/colors/colors.html), so you can also construct your own. Labeling bar valuesYou can add a label with each bar segment's value by setting the `bar_labels` argument to `True`.The default color of the text is white, but you can change it with the `bar_labels_color` argument.
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree, plot_percentage=True,
bar_labels=True, bar_labels_color="snow", colors=plot_likert.colors.default_with_darker_neutral);
###Output
_____no_output_____
###Markdown
Changing the figure sizeThe default size of the plot can be pretty cramped. You can adjust the figure size using the `figsize` argument. This specifies the dimensions of the figure in inches. This argument is passed directly to [matplotlib](https://matplotlib.org), so [see its documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.figure.html) for any questions.
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
figsize=(10,3));
###Output
_____no_output_____
###Markdown
You can do more advanced customization too: Controlling the plotThe plotting function returns a [matplotlib Axes object](https://matplotlib.org/api/axes_api.htmlmatplotlib.axes.Axes), which you can use to customize the figure, just as you would with any other matplotlib plot.Here's a quick example:
###Code
ax = plot_likert.plot_likert(data, plot_likert.scales.agree)
ax.figure.set_size_inches(8, 2)
ax.xaxis.set_label_text('# of awesome people');
ax.set_yticklabels(['Second question', 'First question']);
###Output
_____no_output_____
###Markdown
Instead of manipulating the axes after they've been returned, you can also pass them in first, to enable even more advanced customization: More advanced plotsYou can pass a [matplotlib Axis](https://matplotlib.org/stable/api/axes_api.htmlmatplotlib.axes.Axes) and other options as arguments in order to build more advanced plots. These values will be passed through to [pandas.DataFrame.plot](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.html) (which in turn may pass some of them through to matplotlib); see its documentation for details.
###Code
# We import matplotlib in order to control the whole figure.
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
In this example, we're plotting two Likert plots side-by-side for comparison purposes — in this case, comparing absolute values vs percentages. Notice that we're using different bar widths.
###Code
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15,3))
plot_likert.plot_likert(data, plot_likert.scales.agree,
plot_percentage=False, # show absolute values
ax=ax1, # show on the left-side subplot
legend=0, # hide the legend for the subplot, we'll show a single figure legend instead
);
plot_likert.plot_likert(data, plot_likert.scales.agree,
plot_percentage=True, # show percentage values
ax=ax2, # show on the right-side subplot
legend=0, # hide the legend for the subplot, we'll show a single figure legend instead
width=0.15 # make the bars slimmer
);
# display a single legend for the whole figure
handles, labels = ax2.get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(1.08, .9))
plt.show()
###Output
_____no_output_____
###Markdown
Plotting already-aggregated dataThe `plot_likert` function we've been using operates on "raw" responses:a DataFrame that has a row for each response (as discussed above).However, it's possible your data has a different shape,and/or you have already aggregated the data,and just want to plot it.plot_likert provides a solution for this too!Just take your aggregated counts:
###Code
precomputed_counts = pd.DataFrame(
{'Strongly disagree': {'Q1': 2.0, 'Q2': 2.0},
'Disagree': {'Q1': 1.0, 'Q2': 0.0},
'Neither agree nor disagree': {'Q1': 3.0, 'Q2': 2.0},
'Agree': {'Q1': 3.0, 'Q2': 4.0},
'Strongly agree': {'Q1': 1.0, 'Q2': 2.0}}
)
precomputed_counts
###Output
_____no_output_____
###Markdown
…and pass them to the `plot_counts` function:
###Code
plot_likert.plot_counts(precomputed_counts, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
plot_likert guideWelcome! This notebook aims to introduce you to the usage and options of plot_likert. Have questions or suggestions for improvement? Feel free to open an issue! Prerequisites First, obviously, you'll need plot_likert itself. See homepage for installation instructions.
###Code
import plot_likert
###Output
_____no_output_____
###Markdown
plot_likert operates on [Pandas](https://pandas.pydata.org/) [DataFramas](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html), so you'll need Pandas as well.
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Pandas uses [NumPy](https://numpy.org/) under the hood. You won't need it directly, but this notebook will use it for a couple of things, like random number generation.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Quick startIf you have the data in the right format, you can make a plot with just one line of code! So, let's get some data:
###Code
rng = np.random.default_rng(seed=42)
data = pd.DataFrame(rng.choice(plot_likert.scales.agree, (10,2)), columns=['Q1','Q2'])
###Output
_____no_output_____
###Markdown
and now, the magic happens:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
InputsNow you know how easy it can be to start using plot_likert. But we said that the data has to be in the right format. What does that mean exactly? Let's take a look at what we passed in to the function:
###Code
data
###Output
_____no_output_____
###Markdown
To make our assumptions explicit:1. The input has to be a [Pandas](https://pandas.pydata.org/) [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)2. Each row represents a response from a single respondent3. Each cell contains their response, preferably as a string4. Each column represents a different question asked to each respondent ScalesThe second input to the `plot_likert` function is the _scale_ you're using for your questions,i.e., your questions' answer choices. Here's what it looked like for the dataset above:
###Code
plot_likert.scales.agree
###Output
_____no_output_____
###Markdown
You need to specify the scale explicitly because plot_likert needs to know the order of the scale (for colors and sorting), and in case there are any values in the scale that aren't represented in your dataset. The scale is an array of strings, so you can construct one yourself.
###Code
another_scale = \
['strongly disagree',
'disagree',
'neither agree nor disagree',
'agree',
'strongly agree']
###Output
_____no_output_____
###Markdown
Scale must match inputs exactlyThe scale you pass in must match the data exactly, otherwise a `ValueError` is raised.For example, the scale we just constructed has the fields in lower-case, but the data has the words capitalized. Trying to plot now throws an exception.
###Code
try:
plot_likert.plot_likert(data, another_scale);
print("Yay, everything worked!")
except ValueError as e:
import sys
print("Oh no, something went wrong! The message in the exception is:\n" + str(e), file=sys.stderr)
###Output
Oh no, something went wrong! The message in the exception is:
Strongly disagree is not in the scale
###Markdown
Bundled scalesFor your convenience, plot_likert [includes some commonly used scales](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/scales.py), for example:
###Code
plot_likert.scales.acceptable
plot_likert.scales.raw5
###Output
_____no_output_____
###Markdown
If you'd like to add a scale, please open a pull request. Missing dataIf not all of your respondents answered every question, you might have empty cells.This works fine.However, you'll get [a warning](https://docs.python.org/3/library/warnings.html) if you're plotting percentages (see below).
###Code
missing_data = data.copy()
missing_data.iloc[0,0] = np.NaN
# This produces a warning:
#plot_likert.plot_likert(missing_data, plot_likert.scales.agree, plot_percentage=True);
###Output
_____no_output_____
###Markdown
Plotting percentagesOften, instead of plotting the raw number of responses, you'll want to plot the percentage of respondents who answered a certain way. You can do this by setting the argument `plot_percentage=True`:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
plot_percentage=True);
###Output
_____no_output_____
###Markdown
Customizing colorsYou can change the colors used in the plot by passing in an array of color values as the `colors` argument:
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
colors=plot_likert.colors.likert5);
###Output
_____no_output_____
###Markdown
This is mandatory if you're using a scale that has more than 5 values.Some default color schemes [are provided under `plot_likert.colors`](https://github.com/nmalkin/plot-likert/blob/master/plot_likert/colors.py).A color scheme is just an array of [matplotlib color values](https://matplotlib.org/tutorials/colors/colors.html), so you can also construct your own. Changing the figure sizeThe default size of the plot can be pretty cramped. You can adjust the figure size using the `figsize` argument. This specifies the dimensions of the figure in inches. This argument is passed directly to [matplotlib](https://matplotlib.org), so [see its documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.figure.html) for any questions.
###Code
plot_likert.plot_likert(data, plot_likert.scales.agree,
figsize=(10,3));
###Output
_____no_output_____
###Markdown
You can do more advanced customization too: Controlling the plotThe plotting function returns a [matplotlib Axes object](https://matplotlib.org/api/axes_api.htmlmatplotlib.axes.Axes), which you can use to customize the figure, just as you would with any other matplotlib plot.Here's a quick example:
###Code
ax = plot_likert.plot_likert(data, plot_likert.scales.agree)
ax.figure.set_size_inches(8, 2)
ax.xaxis.set_label_text('# of awesome people');
ax.set_yticklabels(['Second question', 'First question']);
###Output
_____no_output_____
###Markdown
Plotting already-aggregated dataThe `plot_likert` function we've been using operates on "raw" responses:a DataFrame that has a row for each response (as discussed above).However, it's possible your data has a different shape,and/or you have already aggregated the data,and just want to plot it.plot_likert provides a solution for this too!Just take your aggregated counts:
###Code
precomputed_counts = pd.DataFrame(
{'Strongly disagree': {'Q1': 2.0, 'Q2': 2.0},
'Disagree': {'Q1': 1.0, 'Q2': 0.0},
'Neither agree nor disagree': {'Q1': 3.0, 'Q2': 2.0},
'Agree': {'Q1': 3.0, 'Q2': 4.0},
'Strongly agree': {'Q1': 1.0, 'Q2': 2.0}}
)
precomputed_counts
###Output
_____no_output_____
###Markdown
…and pass them to the `plot_counts` function:
###Code
plot_likert.plot_counts(precomputed_counts, plot_likert.scales.agree);
###Output
_____no_output_____
###Markdown
How to customize or extend the knowledge base guidelineYou can design your own description language and use it with Draco or extend the existing language we use here. If you don't know where to start with the constraints, you can first use our [`run_clingo`](https://dig.cmu.edu/draco2/api/run.htmldraco.run.run_clingo) and [`programs`](https://dig.cmu.edu/draco2/api/programs.html) API to generate some recommendations. Then, you should be able to find some recommendations that should have been left out, and you can write constraints to reflect them. If you write your own despription language, you need to set up the search space in a similar way to [`generate.lp`](https://github.com/cmudig/draco2/blob/main/draco/asp/generate.lp) before trying to generate recommendations.For example, the following snippet shows how to use the `run_clingo` API to generate 1 recommendation. You can set a different number to look into more results.
###Code
from draco import answer_set_to_dict, run_clingo
from draco.programs import define, hard, helpers, constraints, generate
from pprint import pprint
prog = (
generate.program
+ define.program
+ helpers.program
+ hard.program
+ constraints.program
)
scatter = """
attribute(number_rows,root,100).
entity(field,root,(f,0)).
attribute((field,name),(f,0),temperature).
attribute((field,type),(f,0),number).
entity(field,root,(f,1)).
attribute((field,name),(f,1),precipitation).
attribute((field,type),(f,1),number).
entity(view,root,(v,0)).
entity(mark,(v,0),(m,0)).
entity(encoding,(m,0),(e,0)).
attribute((encoding,field),(e,0),(f,0)).
entity(encoding,(m,0),(e,1)).
attribute((encoding,field),(e,1),(f,1)).
entity(scale,(v,0),(s,0)).
entity(scale,(v,0),(s,1)).
#show entity/3.
#show attribute/3.
"""
for model in run_clingo(prog + scatter, 1):
pprint(answer_set_to_dict(model.answer_set))
print(model.answer_set)
###Output
{'field': [{'name': 'temperature', 'type': 'number'},
{'name': 'precipitation', 'type': 'number'}],
'number_rows': 100,
'task': 'value',
'view': [{'mark': [{'channel': 'text',
'encoding': [{'channel': 'text',
'field': ('f', 0),
'scale_type': 'linear'},
{'channel': 'size',
'field': ('f', 1),
'scale_type': 'linear'}],
'scale': ('s', 1),
'type': 'text'}],
'scale': [{'channel': 'text', 'type': 'linear'},
{'channel': 'size', 'type': 'linear'}]}]}
[attribute(number_rows,root,100), attribute((field,name),(f,0),temperature), attribute((field,type),(f,0),number), attribute((field,name),(f,1),precipitation), attribute((field,type),(f,1),number), attribute((encoding,field),(e,0),(f,0)), attribute((encoding,field),(e,1),(f,1)), attribute(task,root,value), attribute((scale,type),(s,1),linear), attribute((scale,type),(s,0),linear), attribute((encoding,channel),(e,1),size), attribute((encoding,channel),(e,0),text), attribute((mark,type),(m,0),text), attribute((scale,channel),(s,1),size), attribute((scale,channel),(s,0),text), attribute((mark,channel),(m,0),size), attribute((mark,channel),(m,0),text), attribute((mark,scale),(m,0),(s,0)), attribute((mark,scale),(m,0),(s,1)), attribute((encoding,scale_type),(e,0),linear), attribute((encoding,scale_type),(e,1),linear), entity(field,root,(f,0)), entity(field,root,(f,1)), entity(view,root,(v,0)), entity(mark,(v,0),(m,0)), entity(encoding,(m,0),(e,0)), entity(encoding,(m,0),(e,1)), entity(scale,(v,0),(s,0)), entity(scale,(v,0),(s,1))]
###Markdown
If you see that there are too many recommendations, you can: * add more hard constraints * modify your generator and hard constraints to reduce symmetry in the search space (e.g. similar recommendations with switched entity ids)If you see too few recommendations, you can: * check if some of your constraints are too tight, and move them to soft constraints If you see no recommendations, you might have made mistakes in the hard constraints. You can allow violations to check what are the common ones by removing the `violation` constriant, which forbids any violations, from the programs. Below is an example:
###Code
from draco import get_violations
from draco.asp_utils import blocks_to_program
c = "".join(
blocks_to_program(
constraints.blocks, set(constraints.blocks.keys()) - set(["violation"])
)
)
prog = generate.program + define.program + helpers.program + hard.program + c + scatter
for model in run_clingo(prog + scatter, 1):
pprint(answer_set_to_dict(model.answer_set))
print(model.answer_set)
answer = [str(symbol) + ". " for symbol in model.answer_set]
print(get_violations(answer))
###Output
{'field': [{'name': 'temperature', 'type': 'number'},
{'name': 'precipitation', 'type': 'number'}],
'number_rows': 100,
'task': 'value',
'view': [{'mark': [{'channel': 'shape',
'encoding': [{'channel': 'shape',
'field': ('f', 0),
'scale_type': 'categorical'},
{'channel': 'size',
'field': ('f', 1),
'scale_type': 'categorical'}],
'mark_channel_discrete_or_binned': 'shape',
'scale': ('s', 1),
'type': 'rect'}],
'scale': [{'channel': 'shape', 'type': 'categorical'},
{'channel': 'size', 'type': 'categorical'}]}]}
[attribute(number_rows,root,100), attribute((field,name),(f,0),temperature), attribute((field,type),(f,0),number), attribute((field,name),(f,1),precipitation), attribute((field,type),(f,1),number), attribute((encoding,field),(e,0),(f,0)), attribute((encoding,field),(e,1),(f,1)), attribute(task,root,value), attribute((scale,type),(s,1),categorical), attribute((scale,type),(s,0),categorical), attribute((encoding,channel),(e,1),size), attribute((encoding,channel),(e,0),shape), attribute((mark,type),(m,0),rect), attribute((scale,channel),(s,1),size), attribute((scale,channel),(s,0),shape), attribute((mark,channel),(m,0),size), attribute((mark,channel),(m,0),shape), attribute((mark,scale),(m,0),(s,0)), attribute((mark,scale),(m,0),(s,1)), attribute((encoding,scale_type),(e,0),categorical), attribute((encoding,scale_type),(e,1),categorical), attribute(mark_channel_discrete_or_binned,(m,0),size), attribute(mark_channel_discrete_or_binned,(m,0),shape), entity(field,root,(f,0)), entity(field,root,(f,1)), entity(view,root,(v,0)), entity(mark,(v,0),(m,0)), entity(encoding,(m,0),(e,0)), entity(encoding,(m,0),(e,1)), entity(scale,(v,0),(s,0)), entity(scale,(v,0),(s,1))]
['size_without_point_text', 'shape_without_point', 'categorical_not_color']
|
Climate change Sentiment_Providers.ipynb | ###Markdown
list of negative words:
###Code
with open("Negative_words.txt", "r") as f:
negText = f.read()
negTokens = negText.split("\n") # This splits the text file into tokens on the new line character
negTokens[-1:] = [] # This strips out the final empty item
print(negTokens[-10:])
###Output
['wretchedly', 'wretchedness', 'wrong', 'wrongful', 'wrought', 'wrought', 'yawn', 'zealot', 'zealous', 'zealously']
###Markdown
list of positive words:
###Code
with open("Positive_words.txt", "r") as f:
posText = f.read()
posTokens = posText.split("\n") # This splits the text file into tokens on the new line character
posTokens[-1:] = [] # This strips out the final empty item
print(posTokens[-10:])
###Output
['worthwhile', 'worthy', 'wow', 'wry', 'yearning', 'yearningly', 'youthful', 'zeal', 'zenith', 'zest']
###Markdown
Calling the corpus:
###Code
with open("Providers_text2.txt", "r") as f:
tweetsText = f.read()
tweetsTokens = tweetsText.split("\n") # This splits the text file into tokens on the new line character
tweetsTokens[-1:] = [] # This strips out the final empty item
print(tweetsTokens[:2])
###Output
['Tweet/Post', '"For many years, #JasperNP has held a Spring Flower Count. Volunteers & Parks staff collect data on the flowers that are blooming in the park, which is then used for climate change research as it helps describe ecological trends over time. "']
###Markdown
Tokenizing reviews:
###Code
import re
def tokenizer(theText):
theTokens = re.findall(r'\b\w[\w-]*\b', theText.lower())
return theTokens
def calculator(theTweet):
# Count positive words
numPosWords = 0
theTweetTokens = tokenizer(theTweet)
for word in theTweetTokens:
if word in posTokens:
numPosWords += 1
# Count negative words
numNegWords = 0
for word in theTweetTokens:
if word in negTokens:
numNegWords += 1
sum = (numPosWords - numNegWords)
return sum
# Here is a line for testing this
# print(calculator('Obama has called wrong wrong the GOP budget social Darwinism. Nice try, but they believe in social creationism.'))
###Output
_____no_output_____
###Markdown
Number of total reviews, and Pos/Neg/Neutral reviews:
###Code
# Here we set up the thresholds
posi = 1 # This means there have to be more than 1 positive word
nega = 0 # This means there has to be more than 1 negative words
# Here we prime our variables
numTweets = 0
numPosTweets = 0
numNegTweets = 0
numNeutTweets = 0
# This loop goes through all the Tweets and calculates if sums the number of positive or negative ones.
for tweet in tweetsTokens:
calc = calculator(tweet)
if calc > posi:
numPosTweets += 1
numTweets += 1
elif calc < nega:
numNegTweets += 1
numTweets += 1
else:
numNeutTweets += 1
numTweets += 1
# This prints out the results
print("Total: " + str(numTweets) + "\n" + "Positive: " + str(numPosTweets) + "\n" + "Neutral: " + str(numNeutTweets) + "\n" + "Negative: " +str(numNegTweets))
###Output
Total: 602
Positive: 98
Neutral: 450
Negative: 54
###Markdown
Examples of positive reviews:
###Code
# Here we set up the threshold.
posi = 1 # This means there have to be more than 1 positive word
numberWanted = 4 # Here you decide how many tweets you want
# Here we prime our variables
numTweets = 0
numPosTweets = 0
posiTweetList = []
# This loop goes through all the Tweets and calculates if sums the number of positive or negative ones.
for tweet in tweetsTokens:
calc = calculator(tweet)
if calc > posi and numPosTweets < numberWanted:
numPosTweets += 1
posiTweetList.append(tweet)
print(posiTweetList)
###Output
['"Why is there still a fire ban in JNP? Since May 1, there has been an average of 66 mm of rainfall in the park. It will take almost the same amount of rain over 2 days to significantly change the moisture of medium & large fuels. Until then, the fire ban remains in effect. "', '"When the weather is warm, the thickness of natural ice can change from day to day"', 'Hi @kifehr The status of the fire ban is difficult to predict in advance as the weather and fuel conditions are always fluctuating. Fine fuels respond rapidly to changes in the environment; even if they are dampened by rain they can rebound given just one sunny day', '"The fire ban in #JasperNP, as well as the Municipality of Jasper, has been lifted. Thank you for your cooperation and support throughout this wildfire season. "']
###Markdown
Examples of negative reviews:
###Code
# Here we set up the threshold.
nega = -1 # This means there have to be more than 1 positive word
numberWanted = 4 # Here you decide how many tweets you want
# Here we prime our variables
numTweets = 0
numNegTweets = 0
negaTweetList = []
# This loop goes through all the Tweets and calculates if sums the number of positive or negative ones.
for tweet in tweetsTokens:
calc = calculator(tweet)
if calc < nega and numNegTweets < numberWanted:
numNegTweets += 1
negaTweetList.append(tweet)
print(negaTweetList)
###Output
['"Snarl Peak Wildfire Update: Fire activity increased yesterday due to high temperatures, low humidity and high winds."', 'A fire ban for Jasper National Park is in effect. Fire danger is rated as extreme. Warm, mostly dry weather is anticipated to continue through the Labour Day weekend keeping the fire danger at extreme.', '· Report any wildfires, illegal campfires or suspicious smoke to Parks Canada Dispatch: 780-852-6155.', 'Road construction makes for an added challenge in the daily lives of small terrestrial animals such as ground squirrels. To aid in mitigating the effects of current road paving work in Jasper National Park, our wildlife biologists are tracking the phenology of ground squirrels to ensure that they are kept out of harm’s way #Conservation "']
###Markdown
Test a review:
###Code
tweetToCalc = input("What is the tweet to calculate?")
print(calculator(tweetToCalc))
###Output
What is the tweet to calculate?2
0
###Markdown
Gathering and plotting positive and negative words of a sample review:
###Code
import re
posWordsList = []
negWordsList = []
def tokenizer(theText):
theTokens = re.findall(r'\b\w[\w-]*\b', theText.lower())
return theTokens
def wordsCalculator(theTweet):
# Count positive words
numPosWords = 0
theTweetTokens = tokenizer(theTweet)
for word in theTweetTokens:
if word in posTokens:
numPosWords += 1
posWordsList.append(word)
# Count negative words
numNegWords = 0
for word in theTweetTokens:
if word in negTokens:
numNegWords += 1
negWordsList.append(word)
tweet2Process = input("What tweet do you want to process? ")
wordsCalculator(tweet2Process)
print("Positive words: " + str(posWordsList[:10]))
print("Negative words: " + str(negWordsList[:10]))
###Output
What tweet do you want to process? 2
Positive words: []
Negative words: []
###Markdown
Gathering and plotting all positive and negative words:
###Code
import re
# Here we set up the thresholds
posi = 1 # This means there have to be more than 1 positive word
nega = 0 # This means there has to be more than 1 negative words
# Here we prime our variables
posWordsList = []
negWordsList = []
numTweets = 0
numPosTweets = 0
numNegTweets = 0
numNeutTweets = 0
def wordsGathering(theTweet):
# Count positive words
numPosWords = 0
theTweetTokens = tokenizer(theTweet)
for word in theTweetTokens:
if word in posTokens:
numPosWords += 1
posWordsList.append(word)
# Count negative words
numNegWords = 0
for word in theTweetTokens:
if word in negTokens:
numNegWords += 1
negWordsList.append(word)
sum = (numPosWords - numNegWords)
return sum
# This loop goes through all the Tweets and calculates if sums the number of positive or negative ones.
for tweet in tweetsTokens:
calc = wordsGathering(tweet)
if calc > posi:
numPosTweets += 1
numTweets += 1
elif calc < nega:
numNegTweets += 1
numTweets += 1
else:
numNeutTweets += 1
numTweets += 1
print("Positive words: " + str(len(posWordsList)))
print("Negative words: " + str(len(negWordsList)))
###Output
Positive words: 527
Negative words: 206
###Markdown
plotting positive words:
###Code
import nltk, matplotlib
posDist = nltk.FreqDist(posWordsList)
posDist.tabulate(10)
%matplotlib inline
posDist.plot(25, title="Top Positive Words")
###Output
will open help confident good potential hot elevated well important
89 64 24 18 15 15 15 14 13 12
###Markdown
Plotting negative words:
###Code
negDist = nltk.FreqDist(negWordsList)
negDist.tabulate(10)
%matplotlib inline
negDist.plot(25, title="Top Negative Words")
with open('BIGDATA.txt','r') as myfile:
data_string=myfile.read().replace('\n','')
print("This string has", len(data_string), "characters.")
from textblob import TextBlob
testimonial = TextBlob(data_string) # any string (such as our sonnets)
###Output
_____no_output_____ |
Kaggle/TitanicChallenge/TitanicChallenge.ipynb | ###Markdown
Titanic ChallengeUse a real data set from the Titanic passenger log to predict which passengers were most likely to survive the disaster. OverviewThe data has been split into two groups:- training set (train.csv)- test set (test.csv)The training set should be used to build the machine learning models. The test set should be used to see how well your model performs on unseen data. * use the model you trained to predict whether or not they survived the sinking of the Titanic.gender_submission.csv - a set of predictions that assume all and only female passengers survive, as an example of what a submission file should look like. Import Libraries
###Code
import pandas as pd # pandas is a dataframe library
import matplotlib.pyplot as plt # matplotlib.pyplot plots data
import numpy as np # numpy provides N-dim object support
# do plotting inline instead of a separate window
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load and Review data
###Code
df = pd.read_csv("./data/train.csv") # load training data
df.shape
df.head(5)
df.tail(5)
###Output
_____no_output_____
###Markdown
Definition of features| Variable | Definition | Key ||----------|----------|----------||survival|Survival|0 = No, 1 = Yes||pclass|Ticket class|1 = 1st, 2 = 2nd, 3 = 3rd||sex|Sex|||Age|Age in years|||sibsp| of siblings / spouses aboard the Titanic|||parch| of parents / children aboard the Titanic|||ticket|Ticket number|||fare|Passenger fare|||cabin|Cabin number|||embarked|Port of Embarkation|C = Cherbourg, Q = Queenstown, S = Southampton| Variable Notes- pclass: A proxy for socio-economic status (SES) * 1st = Upper * 2nd = Middle * 3rd = Lower- age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5- sibsp: The dataset defines family relations in this way...- Sibling = brother, sister, stepbrother, stepsister- Spouse = husband, wife (mistresses and fiancés were ignored)- parch: The dataset defines family relations in this way...- Parent = mother, father- Child = daughter, son, stepdaughter, stepson- Some children travelled only with a nanny, therefore parch=0 for them. Check for null Values
###Code
df.isnull().values.any()
###Output
_____no_output_____
###Markdown
If the data has missing values, they will become NaNs in the Numpy arrays generated by the vectorizor so lets get rid of them
###Code
df.replace('?', 0)
df.fillna( 0, inplace = True )
def plot_corr(df, size=11):
"""
Function Plots a graphical correlation matrix for each pair of columns in the dataframe.
Imput:
df: pandas DataFrame
size: vertical and horizontal size of plot
Dislays:
matrix of corelation betewwn columns.
"""
corr = df.corr() # data frame correlation function
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(corr) # color code the rectangles by corelation value
plt.xticks(range(len(corr.columns)), corr.columns) # draw x tick marks
plt.yticks(range(len(corr.columns)), corr.columns) # draw y tick marks
plot_corr(df)
df.corr()
###Output
_____no_output_____
###Markdown
When corelation columns are found, it is advised that they are deletedDelete Name field as it will not help with anything for predictionDelete Cabin as only 19.6% of data is providedDelete Ticket
###Code
del df['Name']
del df['Cabin']
del df['Ticket']
df.head()
###Output
_____no_output_____
###Markdown
Molding Data Check Data Types
###Code
df.head()
###Output
_____no_output_____
###Markdown
Change male to 1, female to 0
###Code
sex_map = { "male" : 1, "female" : 0}
df['Sex'] = df['Sex'].map(sex_map)
###Output
_____no_output_____
###Markdown
Embarked: C = Cherbourg, Q = Queenstown, S = SouthamptonTODO: check if prediction is better when mapping the embarked to a number
###Code
df.head()
###Output
_____no_output_____
###Markdown
Check true/false ration in Survived column
###Code
num_survived = len(df.loc[df['Survived'] == 1])
num_not_survived = len(df.loc[df['Survived'] == 0])
print("Number of people who survived: {0} ({1:2.2f}%)".format(num_survived, (num_survived/ (num_survived+num_not_survived))*100))
print("Number of people who did not survived: {0} ({1:2.2f}%)".format(num_not_survived, (num_not_survived/ (num_survived+num_not_survived))*100))
plot_corr(df)
df.corr()
###Output
_____no_output_____
###Markdown
Hidden Missing Values Impute Age with the mean age
###Code
from sklearn.preprocessing import Imputer
# Impute with mean all 0 readings in Age
imp = Imputer(missing_values=0, strategy="mean", axis=0)
df["Age"] = imp.fit_transform(df[["Age"]]).ravel()
###Output
_____no_output_____
###Markdown
Splitting the data70% testing, 30% training
###Code
from sklearn.cross_validation import train_test_split
feature_col_names = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]
predicted_class_name = ["Survived"]
x = df[feature_col_names].values # predictor feature columns (6 x m)
y = df[predicted_class_name].values # predicted class (1 = true, 0 = false) comumn (1 X m)
split_test_size = 0.30
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=split_test_size, random_state=42)
# test_size = 0.3 is 30%, 42 is the answer to everything (seed for splitting)
###Output
_____no_output_____
###Markdown
Check to ensure we have the desired 70% train, 30% test split of the data
###Code
print("{0:0.2f}% in training set".format((len(x_train)/len(df.index))*100))
print("{0:0.2f}% in test set".format((len(x_test)/len(df.index))*100))
print("Original True : {0} ({1:0.2f}%)".format(len(df.loc[df['Survived'] == 1]), (len(df.loc[df['Survived'] == 1])/len(df.index)) * 100.0))
print("Original False : {0} ({1:0.2f}%)".format(len(df.loc[df['Survived'] == 0]), (len(df.loc[df['Survived'] == 0])/len(df.index)) * 100.0))
print("")
print("Training True : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 1]), (len(y_train[y_train[:] == 1])/len(y_train) * 100.0)))
print("Training False : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 0]), (len(y_train[y_train[:] == 0])/len(y_train) * 100.0)))
print("")
print("Test True : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 1]), (len(y_test[y_test[:] == 1])/len(y_test) * 100.0)))
print("Test False : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 0]), (len(y_test[y_test[:] == 0])/len(y_test) * 100.0)))
###Output
Original True : 342 (38.38%)
Original False : 549 (61.62%)
Training True : 231 (37.08%)
Training False : 392 (62.92%)
Test True : 111 (41.42%)
Test False : 157 (58.58%)
###Markdown
Post-split Data Preparation Hidden Missing Values
###Code
df.head(10)
print("# rws in dataframe {0}".format(len(df)))
print("# rows missing Age: {0}".format(len(df.loc[df['Age'] == 0]))) # not missing anymore
print("# rows missing Pclass: {0}".format(len(df.loc[df['Pclass'] == 0])))
print("# rows missing Fare: {0}".format(len(df.loc[df['Fare'] == 0]))) # ignore, only 15 entries
###Output
# rws in dataframe 891
# rows missing Age: 0
# rows missing Pclass: 0
# rows missing Fare: 15
###Markdown
Training Initial Algorithm - Naive Bayes
###Code
from sklearn.naive_bayes import GaussianNB
# create Gausian Naive Bayes model object and train it with the data
nb_model = GaussianNB()
nb_model.fit(x_train, y_train.ravel())
###Output
_____no_output_____
###Markdown
Performance on Training Data
###Code
# predict values using the training data
nb_predict_train = nb_model.predict(x_train)
# import the preformance metrics library
from sklearn import metrics
# Acuracy
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_train, nb_predict_train)))
print()
###Output
Accuracy: 0.7945
###Markdown
Performance on Testing Data
###Code
# predict values using the training data
nb_predict_test = nb_model.predict(x_test)
# Acuracy
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_test, nb_predict_test)))
print()
###Output
Accuracy: 0.7910
###Markdown
Metrics
###Code
print("Confusin Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, nb_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, nb_predict_test))
###Output
Confusin Matrix
[[130 27]
[ 29 82]]
Classification Report
precision recall f1-score support
0 0.82 0.83 0.82 157
1 0.75 0.74 0.75 111
avg / total 0.79 0.79 0.79 268
###Markdown
Random ForestThe difference between train data prediction and test data prediction is too big -> model trained too well
###Code
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state = 42) # create random forest object
rf_model.fit(x_train, y_train.ravel())
###Output
_____no_output_____
###Markdown
Predict Training data
###Code
rf_predict_train = rf_model.predict(x_train)
# training accuracy
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_train, rf_predict_train)))
###Output
Accuracy: 0.9631
###Markdown
Predict Test data
###Code
rf_predict_test = rf_model.predict(x_test)
# training accuracy
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_test, rf_predict_test)))
print("Confusin Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, rf_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, rf_predict_test))
###Output
Confusin Matrix
[[132 25]
[ 38 73]]
Classification Report
precision recall f1-score support
0 0.78 0.84 0.81 157
1 0.74 0.66 0.70 111
avg / total 0.76 0.76 0.76 268
###Markdown
Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
lr_model = LogisticRegression(C=0.7, random_state=42)
lr_model.fit(x_train, y_train.ravel())
lr_predict_test = lr_model.predict(x_test)
# training metrics
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_test, lr_predict_test)))
print("Confusin Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, lr_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_predict_test))
###Output
Accuracy: 0.8060
Confusin Matrix
[[138 19]
[ 33 78]]
Classification Report
precision recall f1-score support
0 0.81 0.88 0.84 157
1 0.80 0.70 0.75 111
avg / total 0.81 0.81 0.80 268
###Markdown
Setting Regularization parameter
###Code
C_start = 0.1
C_end = 5
C_inc = 0.1
C_values, recall_scores = [], []
C_val = C_start
best_recall_score = 0
while (C_val < C_end):
C_values.append(C_val)
lr_model_loop = LogisticRegression(C=C_val, random_state=42)
lr_model_loop.fit(x_train, y_train.ravel())
lr_predict_loop_test = lr_model_loop.predict(x_test)
recall_score = metrics.recall_score(y_test, lr_predict_loop_test)
recall_scores.append(recall_score)
if (recall_score > best_recall_score):
best_recall_score = recall_score
best_lr_predict_test = lr_predict_loop_test
C_val = C_val + C_inc
best_score_C_val = C_values[recall_scores.index(best_recall_score)]
print("1st max value of {0:.3f} occured at C={1:.3f}".format(best_recall_score, best_score_C_val))
%matplotlib inline
plt.plot(C_values, recall_scores, "-")
plt.xlabel("C value")
plt.ylabel("recall score")
###Output
1st max value of 0.712 occured at C=5.000
###Markdown
Logistic Regression with class_weight='balanced'
###Code
C_start = 0.1
C_end = 5
C_inc = 0.1
C_values, recall_scores = [], []
C_val = C_start
best_recall_score = 0
while (C_val < C_end):
C_values.append(C_val)
lr_model_loop = LogisticRegression(C=C_val, class_weight='balanced', random_state=42)
lr_model_loop.fit(x_train, y_train.ravel())
lr_predict_loop_test = lr_model_loop.predict(x_test)
recall_score = metrics.recall_score(y_test, lr_predict_loop_test)
recall_scores.append(recall_score)
if (recall_score > best_recall_score):
best_recall_score = recall_score
best_lr_predict_test = lr_predict_loop_test
C_val = C_val + C_inc
best_score_C_val = C_values[recall_scores.index(best_recall_score)]
print("1st max value of {0:.3f} occured at C={1:.3f}".format(best_recall_score, best_score_C_val))
%matplotlib inline
plt.plot(C_values, recall_scores, "-")
plt.xlabel("C value")
plt.ylabel("recall score")
#from sklearn.linear_model import LogisticRegression
lr_model = LogisticRegression(class_weight='balanced', C=best_score_C_val, random_state=42)
lr_model.fit(x_train, y_train.ravel())
lr_predict_test = lr_model.predict(x_test)
# training metrics
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_test, lr_predict_test)))
print("Confusin Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, lr_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_predict_test))
print("{0:0.4f}".format(metrics.recall_score(y_test, lr_predict_test)))
###Output
Accuracy: 0.8134
Confusin Matrix
[[128 29]
[ 21 90]]
Classification Report
precision recall f1-score support
0 0.86 0.82 0.84 157
1 0.76 0.81 0.78 111
avg / total 0.82 0.81 0.81 268
0.8108
###Markdown
Logistic RegressionCV - Cross Validation
###Code
from sklearn.linear_model import LogisticRegressionCV
lr_cv_model = LogisticRegressionCV(n_jobs=-1, random_state=42, Cs=3, cv=10, refit=False, class_weight="balanced")
lr_cv_model.fit(x_train, y_train.ravel())
###Output
_____no_output_____
###Markdown
Predict Test data
###Code
lr_cv_predict_test = lr_cv_model.predict(x_test)
# training accuracy
print("Accuracy: {0:0.4f}".format(metrics.accuracy_score(y_test, lr_cv_predict_test)))
print("Confusin Matrix")
print("{0}".format(metrics.confusion_matrix(y_test, lr_cv_predict_test)))
print("")
print("Classification Report")
print(metrics.classification_report(y_test, lr_cv_predict_test))
print("{0:0.4f}".format(metrics.recall_score(y_test, lr_cv_predict_test)))
###Output
Accuracy: 0.8134
Confusin Matrix
[[127 30]
[ 20 91]]
Classification Report
precision recall f1-score support
0 0.86 0.81 0.84 157
1 0.75 0.82 0.78 111
avg / total 0.82 0.81 0.81 268
0.8198
###Markdown
Predictions to the test data Use data from test.csv file
###Code
test_df = pd.read_csv("./data/test.csv") # load test data
test_df.head()
###Output
_____no_output_____
###Markdown
Clean test data
###Code
test_df.replace('?', 0)
test_df.fillna( 0, inplace = True )
del test_df['Name']
del test_df['Cabin']
del test_df['Ticket']
test_df['Sex'] = test_df['Sex'].map(sex_map)
# Impute with mean all 0 readings in Age
test_df["Age"] = imp.fit_transform(test_df[["Age"]]).ravel()
print("# rws in dataframe {0}".format(len(test_df)))
print("# rows missing Age: {0}".format(len(test_df.loc[df['Age'] == 0]))) # not missing anymore
print("# rows missing Pclass: {0}".format(len(test_df.loc[df['Pclass'] == 0])))
print("# rows missing Fare: {0}".format(len(test_df.loc[df['Fare'] == 0])))
test_df.head()
#get queries IDs for later usage
queryIds = test_df['PassengerId']
x = test_df[feature_col_names].values
predictions = lr_cv_model.predict(x)
#-------------------------------------------
# Print predictions to file
#-------------------------------------------
# open file to write Predictions
predictionFile = open('./solutions/prediction.csv', 'w')
predictionFile.write("PassengerId,Survived\n")
for qId, prediction in zip(queryIds, predictions):
#print("{0}, {1}".format(qId, prediction))
predictionFile.write("{0}, {1}\n".format(qId, prediction))
###Output
_____no_output_____ |
Stock Market Prediction.ipynb | ###Markdown
ARIMAARIMA (AutoRegressive Integrated Moving Average) is a forecasting algorithm based on the idea that the information in the past values of the time series can alone be used to predict the future values.ARIMA models explain a time series based on its own past values, basically its own lags and the lagged forecast errors.An ARIMA model is characterized by 3 terms (p, d, q):- p is the order of the AR term- d is the number of differencing required to make the time series stationary- q is the order of the MA termAs we see in the parameters required by the model, any stationary time series can be modeled with ARIMA models. StationarityA stationary time series is one whose properties do not depend on the time at which the series is observed. Thus, time series with trends, or with seasonality, are not stationary — the trend and seasonality will affect the value of the time series at different times.Subtract the previous value from the current value. Now if we just difference once, we might not get a stationary series so we might need to do that multiple times. And the minimum number of differencing operations needed to make the series stationary needs to be imputed into our ARIMA model. ADF testWe'll use the Augumented Dickey Fuller (ADF) test to check if the price series is stationary.The null hypothesis of the ADF test is that the time series is non-stationary. So, if the p-value of the test is less than the significance level (0.05) then we can reject the null hypothesis and infer that the time series is indeed stationary.So, in our case, if the p-value > 0.05 we'll need to find the order of differencing.
###Code
# Check if price series is stationary
from statsmodels.tsa.stattools import adfuller
result = adfuller(df.Close.dropna())
print(f"ADF Statistic: {result[0]}")
print(f"p-value: {result[1]}")
###Output
ADF Statistic: -1.448789416529229
p-value: 0.558675490721378
###Markdown
p-value > 0.05, therefore the time series is not stationary.
###Code
#!pipenv install --skip-lock pmdarima
from pmdarima.arima.utils import ndiffs
ndiffs(df.Close, test="adf")
###Output
_____no_output_____
###Markdown
Therefore d value is 1 pp is the order of the Auto Regressive (AR) term. It refers to the number of lags to be used as predictors. We can find out the required number of AR terms by inspecting the Partial Autocorrelation (PACF) plot.The partial autocorrelation represents the correlation between the series and its lags.
###Code
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
diff = df.Close.diff().dropna()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
ax1.plot(diff)
ax1.set_title("Difference once")
ax2.set_ylim(0, 1)
plot_pacf(diff, ax=ax2);
###Output
_____no_output_____
###Markdown
qq is the order of the Moving Average (MA) term. It refers to the number of lagged forecast errors that should go into the ARIMA Model.We can look at the ACF plot for the number of MA terms.
###Code
diff = df.Close.diff().dropna()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4))
ax1.plot(diff)
ax1.set_title("Difference once")
ax2.set_ylim(0, 1)
plot_acf(diff, ax=ax2);
dataset=df.copy()
dataset.set_index('Date', inplace=True)
dataset = dataset[['Close']]
from matplotlib import pyplot
pyplot.figure()
pyplot.subplot(211)
plot_acf(dataset, ax=pyplot.gca(),lags=10)
pyplot.subplot(212)
plot_pacf(dataset, ax=pyplot.gca(),lags=10)
pyplot.show()
###Output
_____no_output_____
###Markdown
In order to evaluate the ARIMA model, I decided to use two different error functions: Mean Squared Error (MSE) and Symmetric Mean Absolute Percentage Error (SMAPE). SMAPE is commonly used as an accuracy measure based on relative errors.SMAPE is not currently supported in Scikit-learn as a loss function I, therefore, had first to create this function on my own.
###Code
def smape_kun(y_true, y_pred):
return np.mean((np.abs(y_pred - y_true) * 200/ (np.abs(y_pred) + np.abs(y_true))))
###Output
_____no_output_____
###Markdown

###Code
train_ar = train_data['Close'].values
test_ar = test_data['Close'].values
history = [x for x in train_ar]
print(type(history))
predictions = list()
for t in range(len(test_ar)):
model = ARIMA(history, order=(2,1,1))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test_ar[t]
history.append(obs)
error = mean_squared_error(test_ar, predictions)
print('Testing Mean Squared Error: %.3f' % error)
error2 = smape_kun(test_ar, predictions)
print('Symmetric mean absolute percentage error: %.3f' % error2)
###Output
<class 'list'>
Testing Mean Squared Error: 33910.630
Symmetric mean absolute percentage error: 8.380
###Markdown
SMAPE is commonly used loss function for Time Series problems and can, therefore, provide a more reliable analysis. That showed that our model is good.
###Code
print(model_fit.summary())
residuals=pd.DataFrame(model_fit.resid)
residuals.plot()
residuals.plot(kind='kde')
residuals.describe()
plt.figure(figsize=(14,7))
plt.plot(df['Close'], 'green', color='blue', label='Training Data')
plt.plot(test_data.index, predictions, color='green', marker='o', linestyle='dashed', label='Predicted Price')
plt.plot(test_data.index, test_data['Close'], color='red', label='Actual Price')
plt.title('Microsoft Prices Prediction')
plt.xlabel('Dates')
plt.ylabel('Prices')
plt.xticks(np.arange(0,1500, 300), df['Date'][0:1500:300])
plt.legend()
# Actual vs Fitted
model_fit.plot_predict(
start=1,
end=60,
dynamic=False,
);
plt.figure(figsize=(14,7))
plt.plot(test_data.index, predictions, color='green', marker='o', linestyle='dashed',label='Predicted Price')
plt.plot(test_data.index, test_data['Close'], color='red', label='Actual Price')
plt.legend()
plt.title('BSESN Stock Prices Prediction')
plt.xlabel('Dates')
plt.ylabel('Prices')
plt.xticks(np.arange(1000,1259,100), df['Date'][1000:1259:100])
plt.legend()
###Output
_____no_output_____
###Markdown
The above image is a zoomed in version. From this can be noticed how the two curves closely follow each other. However, the predicted price seems to look like a “noisy” version of the actual price. This analysis using ARIMA lead overall to appreciable results. This model demonstrated in fact to offer good prediction accuracy and to be relatively fast compared to other alternatives such as RRNs (Recurrent Neural Networks). Sentiment analysis NLTK's VADER moduleVADER is an NLTK module that provides sentiment scores based on words used ("completely" boosts a score, while "slightly" reduces it), on capitalization & punctuation ("GREAT!!!" is stronger than "great."), and negations (words like "isn't" and "doesn't" affect the outcome).To view the source code visit https://www.nltk.org/_modules/nltk/sentiment/vader.html
###Code
import pandas as pd #Importing the PANDAS python library
import numpy as np #importing Numpy
%matplotlib inline
#from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer #initiating VADER instance
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyser = SentimentIntensityAnalyzer()
headlines= pd.read_csv("C:\\Users\\Kv\\Desktop\\The Sparks Foundation\\stock price prediction\\india-news-headlines.csv")
headlines.head()
# cleaning dataset
#Drop rows with missing values
headlines.dropna(inplace=True)
headlines.tail()
headlines["Date"] = pd.to_datetime(headlines["Date"],format='%Y%m%d')
headlines.info()
headlines.shape
#Grouping the headlines for each day
#headlines['headline_text'] = headlines.groupby(['Date']).transform(lambda x : ' '.join(x))
headlines = headlines.drop_duplicates(subset='Date', keep='first', inplace=False)
headlines.reset_index(inplace = True, drop = True)
headlines
headlines.shape
#Calculating score for each news headline in the dataframe/dataset
i=0 #counter
compval1 = [ ] #empty list to hold our computed 'compound' VADER scores
while i<len(headlines):
k = analyser.polarity_scores(headlines.iloc[i]['headline_text'])
compval1.append(k['compound'])
i = i+1
#converting sentiment values to numpy for easier usage
compval1 = np.array(compval1)
len(compval1)
headlines['VADER score'] = compval1
headlines.head(20)
i = 0
predicted_value = [ ] #empty series to hold our predicted values
while(i<len(headlines)):
if ((headlines.iloc[i]['VADER score'] >= 0.1)):
predicted_value.append('positive')
i = i+1
elif ((headlines.iloc[i]['VADER score'] > -0.1) & (headlines.iloc[i]['VADER score'] < 0.1)):
predicted_value.append('neutral')
i = i+1
elif ((headlines.iloc[i]['VADER score'] <= -0.1)):
predicted_value.append('negative')
i = i+1
headlines['sentiment'] = predicted_value
headlines.head(10)
headlines.sentiment.value_counts()
###Output
_____no_output_____
###Markdown
Hybrid
###Code
df_merge = pd.merge(df, headlines, how='left', on='Date')
df_merge
new_df=df_merge[['Close','sentiment']]
new_df
new_df.groupby(['sentiment']).mean()
###Output
_____no_output_____
###Markdown
Author: Ankit Kumar The Sparks Foundation Stock Market Prediction using Numerical and Textual Analysis Objective: Create a hybrid model for stock price/performance prediction using numerical analysis of historical stock prices, and sentimental analysis of news headlines Importing Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings('ignore')
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import re
###Output
_____no_output_____
###Markdown
Reading Data File
###Code
#### Samsung Stock Dataset #####
df_stock = pd.read_csv("C:\\Users\\ankit\\Downloads\\005930.KS.csv")
df_stock.head()
#### News Headline Dataset #####
df_headline = pd.read_csv("C:\\Users\\ankit\\Downloads\\india-news-headlines.csv")
df_headline.head()
df_stock.isnull().sum()
#### Handling Missing Values ####
for i in ["Open","High","Low","Close","Adj Close","Volume"]:
df_stock[i].fillna(df_stock[i].mean(),inplace=True)
df_stock.isnull().sum()
df_headline.drop(columns=["headline_category"],inplace=True)
###Output
_____no_output_____
###Markdown
Setting date as a index
###Code
##### Stock price Data ######
df_stock['Date']=pd.to_datetime(df_stock['Date'])
##### News Headlines Data #####
df_headline['publish_date'] = df_headline['publish_date'].astype(str)
df_headline = df_headline.filter(['publish_date', 'headline_text']) # filtering the important columns required
df_headline = df_headline.groupby(['publish_date'])['headline_text'].apply(lambda x: ','.join(x)).reset_index() # grouping the news headlines according to 'Date'
df_headline['publish_date']=pd.to_datetime(df_headline['publish_date'])
df_stock.set_index('Date',inplace=True)
df_stock.head()
df_headline.set_index('publish_date',inplace=True)
df_headline.head()
###Output
_____no_output_____
###Markdown
Combining Samsung Stock Dataset and News Headline Dataset
###Code
stock_market=pd.concat([df_stock,df_headline],axis=1)
stock_market.dropna(axis=0,inplace=True)
stock_market.head()
###Output
_____no_output_____
###Markdown
Cleaning the textual data by using Natural Language Preprocessing (NLP)
###Code
lent=len(stock_market)
corpus111=[]
for i in range(0,lent):
text = re.sub('[^a-zA-Z]', ' ', stock_market.iloc[i,6])
text = text.lower()
text = text.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
text = [ps.stem(word) for word in text if not word in set(all_stopwords)]
text = ' '.join(text)
corpus111.append(text)
stock_market['headline_text']=corpus111
###Output
_____no_output_____
###Markdown
Calculating Sentiment Score
###Code
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import unicodedata
sid = SentimentIntensityAnalyzer()
# calculating sentiment scores
stock_market['Compound'] = stock_market['headline_text'].apply(lambda x: sid.polarity_scores(x)['compound'])
stock_market['Negative'] = stock_market['headline_text'].apply(lambda x: sid.polarity_scores(x)['neg'])
stock_market['Neutral'] = stock_market['headline_text'].apply(lambda x: sid.polarity_scores(x)['neu'])
stock_market['Positive'] = stock_market['headline_text'].apply(lambda x: sid.polarity_scores(x)['pos'])
stock_market.head()
stock_market["Close"]
stock_market.shape
###Output
_____no_output_____
###Markdown
Finalizing Stock Data and Save as a csv file
###Code
stock_market.drop(columns=["headline_text"],inplace=True)
index = stock_market.index
index.name = "Date"
stock_market.to_csv('stock_market.csv')
###Output
_____no_output_____
###Markdown
Reading Stock Data File
###Code
df=pd.read_csv("stock_market.csv")
df.set_index('Date', inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis of Stock data
###Code
df.shape
df.isnull().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Visualizing Close Price
###Code
plt.figure(figsize=(18,8))
df["Close"].plot()
plt.xlabel('Date')
plt.ylabel('Close Price')
# calculating 7 day rolling mean
df.rolling(7).mean().head(15)
# plotting the close price and a 30-day rolling mean of close price
plt.figure(figsize=(16,10))
df['Close'].plot()
plt.ylabel("Price")
df.rolling(window=30).mean()['Close'].plot()
###Output
_____no_output_____
###Markdown
Testing For Stationarity
###Code
from statsmodels.tsa.stattools import adfuller
def adfuller_test(Price):
result=adfuller(Price)
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result,labels):
print(label+' : '+str(value) )
if result[1] <= 0.05: # If p_value is less than 0.05 than data is Stationary otherwise data is Non-Stationary
print("Data is stationary")
else:
print("Data is non-stationary ")
#### Testing Adfuller_test
adfuller_test(df['Close'])
###Output
ADF Test Statistic : 1.8122636122701679
p-value : 0.9983730323454776
#Lags Used : 30
Number of Observations Used : 4959
Data is non-stationary
###Markdown
Converting Non-Stationary to Stationary data
###Code
## For converting non stationary to stationary data
df['Seasonal Close Price Difference']=df['Close']-df['Close'].shift(12)
df.head(15)
df.rename(columns={"Close":"Close Price"}, inplace=True)
### Again testing for Stationarity
adfuller_test(df['Seasonal Close Price Difference'].dropna())
#### Visualize the Data ####
plt.figure(figsize=(18,8))
df["Seasonal Close Price Difference"].plot()
plt.xlabel('Date')
plt.ylabel('Seasonal Close Price Difference')
###Output
_____no_output_____
###Markdown
Auto Regressive Model
###Code
### acf = Autocorellation Function
### pacf = Partial Autocorellation Function
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
## Plotting Autocorellation and Partial Autocorellation Graph
## Collecting p,q,d values from observing these graph
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = plot_acf(df['Seasonal Close Price Difference'].dropna(),lags=40,ax=ax1)
ax2 = fig.add_subplot(212)
fig = plot_pacf(df['Seasonal Close Price Difference'].dropna(),lags=40,ax=ax2)
###Output
_____no_output_____
###Markdown
Predicting Using SARIMAX
###Code
import statsmodels.api as sm
model=sm.tsa.statespace.SARIMAX(df['Close Price'],order=(1, 1, 1),seasonal_order=(1,1,1,12)) # order=(p,d,q)
results=model.fit()
results.summary()
results.plot_diagnostics()
## Predicting the forecasted value
df['Forecasted Close Price']=results.predict(start=1,end=4989,dynamic=False)
#### Comparison b/w Actual Close Price and Predicted price
predict=pd.DataFrame({"Close Price": df['Close Price'],"Prediction":df['Forecasted Close Price']})
predict.dropna(axis=0, inplace=True)
predict.head()
###Output
_____no_output_____
###Markdown
Accuracy Percentage
###Code
accuracy = (predict["Prediction"].sum() / predict["Close Price"].sum()) * 100
print(accuracy.round(1),"%")
###Output
99.9 %
###Markdown
Comparison Graph Between Close price and Forecasted Close Price
###Code
df[['Close Price','Forecasted Close Price']].plot(figsize=(15,8))
ax=plt.axes()
ax.set_facecolor("black")
plt.xlabel('Date', fontweight="bold")
plt.ylabel('Price',fontweight="bold")
plt.title('Comparison Graph',fontweight="bold")
###Output
_____no_output_____
###Markdown
Predicting Close Price By using train_test_split method
###Code
from pmdarima.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Split Data in test and train
###Code
train, test = train_test_split(df[["Close Price"]], test_size =0.3)
test.shape
###Output
_____no_output_____
###Markdown
Predicting using SARIMAX
###Code
prediction = pd.DataFrame(results.predict(n_periods = 1497), test.index)
prediction.columns = ["predicted_sales"]
test["predicted_sales"] = prediction
test
###Output
_____no_output_____
###Markdown
Visualizing Predicted Data
###Code
plt.figure(figsize = (16,10))
ax=plt.axes()
ax.set_facecolor("black")
plt.plot(train, label = "Training")
plt.plot(test, label = "Testing",color="green")
plt.plot(prediction, label = "Prediction")
plt.legend()
###Output
_____no_output_____
###Markdown
Author - ByruSrilakshmi Grip Task 7 Stock Market Prediction using Numerical and Textual Analysis- Objective : To Create a hybrid model for stock price or performance prediction using numerical analysis of historical stock prices, and sentimental analysis of news headlines. The stock to analyze and predict is SENSEX (S&P BSE SENSEX)- Stock to analyze and predict - SENSEX (S&P BSE SENSEX)- Download historical stock prices from finance.yahoo.com- Download textual (news) data from https://bit.ly/36fFPI6- Use either R or Python, or both for separate analysis and then combine the findings to create a hybrid model- You are free to select a different stock to analyze and news dataset as well while not changing the objective of the task. Import the Important Libraries
###Code
# Import the libraries
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
import nltk
import re
from textblob import TextBlob
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
import xgboost
import lightgbm
# For reading stock data from yahoo
from pandas_datareader.data import DataReader
# For time stamps
from datetime import datetime
# Load the first dataset
columns=['Date','Category','News']
ndf = pd.read_csv("india-news-headlines.csv",names=columns)
print('Showing part of the whole dataset:')
ndf.head(5)
ndf.drop(0, inplace=True)
ndf.drop('Category', axis = 1, inplace=True)
print('Showing part of the whole dataset:')
ndf.head(-5)
# Load the second dataset
hisdf = pd.read_csv("^BSESN.csv")
hisdf.head(-5)
###Output
_____no_output_____
###Markdown
Common Dataset Exploration
###Code
# Check for common information of the first datast
ndf["Date"] = pd.to_datetime(ndf["Date"],format='%Y%m%d')
ndf.info()
# Group the headlines for each day
ndf['News'] = ndf.groupby(['Date']).transform(lambda x : ' '.join(x))
ndf = ndf.drop_duplicates()
ndf.reset_index(inplace=True,drop=True)
ndf
# Check for any duplicated values
ndf.isnull().sum()
len(ndf)
hisdf=hisdf[["Date","Open","High","Low","Close","Volume"]]
hisdf.head(-5)
# Check for common information of the second dataset
hisdf["Date"]= pd.to_datetime(hisdf["Date"])
hisdf.info()
hisdf.describe()
# Check for duplicated values
hisdf.isnull().sum()
len(hisdf)
# Figure plot
plt.figure(figsize=(20,10))
hisdf['Close'].plot()
plt.ylabel('BSESN')
###Output
_____no_output_____
###Markdown
Remove Unwanted Characters from the News
###Code
#removing unwanted characters from the News
ndf.replace("[^a-zA-Z']"," ",regex=True,inplace=True)
ndf["News"].head(5)
###Output
_____no_output_____
###Markdown
Historical Analysis Plot the Moving Average
###Code
#Plotting moving average
close = hisdf['Close']
ma = close.rolling(window = 50).mean()
std = close.rolling(window = 50).std()
plt.figure(figsize=(20,10))
hisdf['Close'].plot(color='g',label='Close')
ma.plot(color = 'r',label='Rolling Mean')
std.plot(label = 'Rolling Standard Deviation')
plt.legend()
###Output
_____no_output_____
###Markdown
Plot the Returns
###Code
#Plotting returns
returns = close / close.shift(1) - 1
plt.figure(figsize = (20,10))
returns.plot(label='Return', color = 'g')
plt.title("Returns")
# Train test split
train = hisdf[:1219]
test = hisdf[1219:]
###Output
_____no_output_____
###Markdown
Rolling mean and Standard Deviation
###Code
#Stationarity test
def test_stationarity(timeseries):
#Determine the rolling statistics
rolmean = timeseries.rolling(20).mean()
rolstd = timeseries.rolling(20).std()
#Plot rolling statistics:
plt.figure(figsize = (20,10))
plt.plot(timeseries, color = 'blue', label = 'original')
plt.plot(rolmean, color = 'r', label = 'rolling mean')
plt.plot(rolstd, color = 'black', label = 'rolling std')
plt.xlabel('Date')
plt.legend()
plt.title('Rolling Mean and Standard Deviation', fontsize = 30)
plt.show(block = False)
print('Results of dickey fuller test')
result = adfuller(timeseries, autolag = 'AIC')
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result, labels):
print(label+' : '+str(value) )
if result[1] <= 0.05:
print("Strong evidence against the null hypothesis(Ho), reject the null hypothesis. Data is stationary")
else:
print("Weak evidence against null hypothesis, time series is non-stationary ")
test_stationarity(train['Close'])
train_log = np.log(train['Close'])
test_log = np.log(test['Close'])
mav = train_log.rolling(24).mean()
plt.figure(figsize = (20,10))
plt.plot(train_log)
plt.plot(mav, color = 'red')
train_log.dropna(inplace = True)
test_log.dropna(inplace = True)
test_stationarity(train_log)
train_log_diff = train_log - mav
train_log_diff.dropna(inplace = True)
test_stationarity(train_log_diff)
#Using auto arima to make predictions using log data
from pmdarima import auto_arima
model = auto_arima(train_log, trace = True, error_action = 'ignore', suppress_warnings = True)
model.fit(train_log)
predictions = model.predict(periods = len(test))
predictions = pd.DataFrame(predictions,index = test_log.index,columns=['Prediction'])
plt.plot(train_log, label='Train')
plt.plot(test_log, label='Test')
plt.plot(predictions, label='Prediction')
plt.title('BSESN Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Actual Stock Price')
###Output
_____no_output_____
###Markdown
Error Calculation
###Code
#Calculating error
rms = np.sqrt(mean_squared_error(test_log,predictions))
print("RMSE : ", rms)
###Output
RMSE : 0.026837616405067617
###Markdown
Textual Analysis
###Code
#Functions to get the subjectivity and polarity
def getSubjectivity(text):
return TextBlob(text).sentiment.subjectivity
def getPolarity(text):
return TextBlob(text).sentiment.polarity
#Adding subjectivity and polarity columns
ndf['Subjectivity'] = ndf['News'].apply(getSubjectivity)
ndf['Polarity'] = ndf['News'].apply(getPolarity)
ndf
#Adding sentiment score to df_news
sia = SentimentIntensityAnalyzer()
ndf['Compound'] = [sia.polarity_scores(v)['compound'] for v in ndf['News']]
ndf['Negative'] = [sia.polarity_scores(v)['neg'] for v in ndf['News']]
ndf['Neutral'] = [sia.polarity_scores(v)['neu'] for v in ndf['News']]
ndf['Positive'] = [sia.polarity_scores(v)['pos'] for v in ndf['News']]
ndf
###Output
_____no_output_____
###Markdown
Merge the Historical and Textual Data
###Code
df_merge = pd.merge(hisdf, ndf, how='inner', on='Date')
df_merge
###Output
_____no_output_____
###Markdown
Create Dataset for Model Training
###Code
dfmerge1 = df_merge[['Close','Subjectivity', 'Polarity', 'Compound', 'Negative', 'Neutral', 'Positive']]
dfmerge1
###Output
_____no_output_____
###Markdown
Normalize Data
###Code
scaler = MinMaxScaler()
df = pd.DataFrame(scaler.fit_transform(dfmerge1))
df.columns = dfmerge1.columns
df.index = dfmerge1.index
df.head()
X=df.drop('Close',axis=1)
X
Y=df['Close']
Y
###Output
_____no_output_____
###Markdown
Split the Dataset into Train & Test Data
###Code
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state = 0)
x_train.shape
x_train[:10]
###Output
_____no_output_____
###Markdown
RandomForestRegressor Model
###Code
rf = RandomForestRegressor()
rf.fit(x_train, y_train)
prediction=rf.predict(x_test)
print(prediction[:10])
print(y_test[:10])
print('Mean Squared error: ',mean_squared_error(prediction,y_test))
###Output
[0.59204602 0.77743871 0.46716064 0.73144085 0.25611568 0.33254211
0.63953336 0.683319 0.59969559 0.26583055]
1006 0.966906
1109 0.627192
187 0.299075
896 0.723549
413 0.465435
501 0.578316
546 0.591144
881 0.839169
959 0.903342
268 0.228219
Name: Close, dtype: float64
Mean Squared error: 0.05257968397499098
###Markdown
DecisionTreeRegressor Model
###Code
dtr = DecisionTreeRegressor()
dtr.fit(x_train, y_train)
predictions = dtr.predict(x_test)
print(predictions[:10])
print(y_test[:10])
print('Mean Squared error: ',mean_squared_error(predictions,y_test))
###Output
[0.46121848 0.98284344 0.69232194 0.71547783 0.19176137 0.28527224
0.88757586 0.69698498 0.23569794 0.11570669]
1006 0.966906
1109 0.627192
187 0.299075
896 0.723549
413 0.465435
501 0.578316
546 0.591144
881 0.839169
959 0.903342
268 0.228219
Name: Close, dtype: float64
Mean Squared error: 0.10831900809236311
###Markdown
AdaBoostRegressor Model
###Code
adb = AdaBoostRegressor()
adb.fit(x_train, y_train)
predictions = adb.predict(x_test)
print(mean_squared_error(predictions, y_test))
###Output
0.05492347045438241
###Markdown
LGBMRegressor Model
###Code
gbm = lightgbm.LGBMRegressor()
gbm.fit(x_train, y_train)
predictions = gbm.predict(x_test)
print(mean_squared_error(predictions, y_test))
###Output
0.0583079056070462
###Markdown
XGBRegressor Model
###Code
xgb = xgboost.XGBRegressor()
xgb.fit(x_train, y_train)
predictions = xgb.predict(x_test)
print(mean_squared_error(predictions, y_test))
###Output
0.05968830860645931
###Markdown
Data Preprocessing Feature Scaling
###Code
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0,1))
train_scaled = sc.fit_transform(train)
x_train = []
y_train = []
for i in range(60,1258):
x_train.append(train_scaled[i-60:i,0])
y_train.append(train_scaled[i,0])
x_train = np.array(x_train)
y_train = np.array(y_train)
x_train = np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1))
###Output
_____no_output_____
###Markdown
Building the RNN
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
regressor = Sequential()
#First LSTM Layer
regressor.add(LSTM(units=50, return_sequences = True, input_shape = (x_train.shape[1],1)))
regressor.add(Dropout(0.2))
#Second LSTM Layer
regressor.add(LSTM(units=50, return_sequences = True))
regressor.add(Dropout(0.2))
#Third LSTM Layer
regressor.add(LSTM(units=50, return_sequences = True))
regressor.add(Dropout(0.2))
#Fourth LSTM Layer
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
#Output Layer
regressor.add(Dense(units=1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(x_train,y_train,epochs=100,batch_size=32)
###Output
Epoch 1/100
38/38 [==============================] - 31s 114ms/step - loss: 0.0738
Epoch 2/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0074
Epoch 3/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0055
Epoch 4/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0056
Epoch 5/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0046
Epoch 6/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0050
Epoch 7/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0053
Epoch 8/100
38/38 [==============================] - 5s 133ms/step - loss: 0.0050
Epoch 9/100
38/38 [==============================] - 5s 133ms/step - loss: 0.0060
Epoch 10/100
38/38 [==============================] - 5s 135ms/step - loss: 0.0042
Epoch 11/100
38/38 [==============================] - 6s 154ms/step - loss: 0.0040
Epoch 12/100
38/38 [==============================] - 5s 136ms/step - loss: 0.0037
Epoch 13/100
38/38 [==============================] - 5s 137ms/step - loss: 0.0038
Epoch 14/100
38/38 [==============================] - 5s 133ms/step - loss: 0.0043
Epoch 15/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0041
Epoch 16/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0035
Epoch 17/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0034
Epoch 18/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0043
Epoch 19/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0031
Epoch 20/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0038
Epoch 21/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0035
Epoch 22/100
38/38 [==============================] - 6s 147ms/step - loss: 0.0034
Epoch 23/100
38/38 [==============================] - 6s 169ms/step - loss: 0.0037
Epoch 24/100
38/38 [==============================] - 6s 152ms/step - loss: 0.0037
Epoch 25/100
38/38 [==============================] - 5s 128ms/step - loss: 0.0029
Epoch 26/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0031
Epoch 27/100
38/38 [==============================] - 5s 126ms/step - loss: 0.0037
Epoch 28/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0030
Epoch 29/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0033
Epoch 30/100
38/38 [==============================] - 5s 128ms/step - loss: 0.0028
Epoch 31/100
38/38 [==============================] - 5s 132ms/step - loss: 0.0030
Epoch 32/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0033
Epoch 33/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0025
Epoch 34/100
38/38 [==============================] - 5s 135ms/step - loss: 0.0027
Epoch 35/100
38/38 [==============================] - 6s 145ms/step - loss: 0.0027
Epoch 36/100
38/38 [==============================] - 5s 140ms/step - loss: 0.0025
Epoch 37/100
38/38 [==============================] - 5s 136ms/step - loss: 0.0030
Epoch 38/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0029
Epoch 39/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0027
Epoch 40/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0027
Epoch 41/100
38/38 [==============================] - 5s 126ms/step - loss: 0.0023
Epoch 42/100
38/38 [==============================] - 6s 151ms/step - loss: 0.0025
Epoch 43/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0025
Epoch 44/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0022
Epoch 45/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0024
Epoch 46/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0024
Epoch 47/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0023
Epoch 48/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0027
Epoch 49/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0022
Epoch 50/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0022
Epoch 51/100
38/38 [==============================] - 5s 126ms/step - loss: 0.0021
Epoch 52/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0023
Epoch 53/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0022
Epoch 54/100
38/38 [==============================] - 6s 155ms/step - loss: 0.0021
Epoch 55/100
38/38 [==============================] - 5s 142ms/step - loss: 0.0022
Epoch 56/100
38/38 [==============================] - 6s 150ms/step - loss: 0.0022
Epoch 57/100
38/38 [==============================] - 5s 139ms/step - loss: 0.0019
Epoch 58/100
38/38 [==============================] - 5s 143ms/step - loss: 0.0017
Epoch 59/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0022
Epoch 60/100
38/38 [==============================] - 6s 146ms/step - loss: 0.0021
Epoch 61/100
38/38 [==============================] - 6s 145ms/step - loss: 0.0018
Epoch 62/100
38/38 [==============================] - 6s 150ms/step - loss: 0.0020
Epoch 63/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0021
Epoch 64/100
38/38 [==============================] - 5s 122ms/step - loss: 0.0019
Epoch 65/100
38/38 [==============================] - 5s 122ms/step - loss: 0.0017
Epoch 66/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0022
Epoch 67/100
38/38 [==============================] - 5s 122ms/step - loss: 0.0021
Epoch 68/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0017
Epoch 69/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0016
Epoch 70/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0019
Epoch 71/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0017
Epoch 72/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0019
Epoch 73/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0016
Epoch 74/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0018
Epoch 75/100
38/38 [==============================] - 5s 125ms/step - loss: 0.0019
Epoch 76/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0018
Epoch 77/100
38/38 [==============================] - 5s 132ms/step - loss: 0.0017
Epoch 78/100
38/38 [==============================] - 5s 131ms/step - loss: 0.0016
Epoch 79/100
38/38 [==============================] - 5s 133ms/step - loss: 0.0019
Epoch 80/100
38/38 [==============================] - 5s 132ms/step - loss: 0.0018
Epoch 81/100
38/38 [==============================] - 5s 127ms/step - loss: 0.0017
Epoch 82/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0016
Epoch 83/100
38/38 [==============================] - 5s 134ms/step - loss: 0.0016
Epoch 84/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0016
Epoch 85/100
38/38 [==============================] - 5s 129ms/step - loss: 0.0015
Epoch 86/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0016
Epoch 87/100
38/38 [==============================] - 5s 122ms/step - loss: 0.0014
Epoch 88/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0017
Epoch 89/100
38/38 [==============================] - 5s 124ms/step - loss: 0.0014
Epoch 90/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0016
Epoch 91/100
38/38 [==============================] - 5s 126ms/step - loss: 0.0014
Epoch 92/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0014
Epoch 93/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0013
Epoch 94/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0016
Epoch 95/100
38/38 [==============================] - 5s 123ms/step - loss: 0.0016
Epoch 96/100
38/38 [==============================] - 5s 127ms/step - loss: 0.0015
Epoch 97/100
38/38 [==============================] - 5s 131ms/step - loss: 0.0014
Epoch 98/100
38/38 [==============================] - 5s 132ms/step - loss: 0.0015
Epoch 99/100
###Markdown
Now, making predictions using the test dataset
###Code
stock_test = pd.read_csv('Google_Stock_Price_Test.csv',index_col = "Date", parse_dates = True)
actual_stock = stock_test.iloc[:,1:2].values
stock_test.head()
stock_test.info()
stock_test['Volume'] = stock_test['Volume'].str.replace(',','').astype(float)
test = pd.DataFrame(stock_test['Open'])
stock_total = pd.concat((stock['Open'],stock_test['Open']), axis = 0)
inputs = stock_total[len(stock_total) - len(stock_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
x_test = []
for i in range(60,80):
x_test.append(inputs[i-60:i,0])
x_test = np.array(x_test)
x_test = np.reshape(x_test,(x_test.shape[0],x_test.shape[1],1))
stock_prediction = regressor.predict(x_test)
stock_prediction = sc.inverse_transform(stock_prediction)
stock_prediction = pd.DataFrame(stock_prediction)
###Output
_____no_output_____
###Markdown
Visualizing the Results
###Code
plt.figure(figsize = (12,6))
plt.title("Google Stock Price Prediction")
plt.xlabel("Time")
plt.ylabel("Google Stock Price")
plt.plot(actual_stock, color = 'blue',label = 'Actual Google Stock Price')
plt.plot(stock_prediction,color = 'red', label = 'Predicted Google Stock Price')
plt.legend()
plt.plot()
###Output
_____no_output_____ |
Files/Files.ipynb | ###Markdown
Python Reference - Files**Author:** Robert Bantele Definitionsome basic snippets for working with files Linkshttps://docs.python.org/3/library/functions.htmlopen https://docs.python.org/3/tutorial/inputoutput.htmlreading-and-writing-files https://stackabuse.com/file-handling-in-python/ operating systemfile systems are a bit different depending on the os. use the **platform.system** method to find out which os you are on
###Code
import platform
cur_plat = platform.system()
print(cur_plat)
###Output
Windows
###Markdown
file pathsuse **os.path.join** to join correct file paths for your os. putting a dot as first argument will build the path from the script directory
###Code
import os
file_name: str = "File.txt"
file_path = os.path.join(".", file_name)
print(file_path)
###Output
.\File.txt
###Markdown
directory contentsuse **os.listdir** to get all files in a directory into a list
###Code
import os
contents = os.listdir(".")
print(contents)
###Output
['.ipynb_checkpoints', 'CSV.csv', 'CSV.ipynb', 'Files.ipynb', 'new folder']
###Markdown
current working directoryuse **os.getcwd()** to get the path to the working directory
###Code
import os
current_working_directory = os.getcwd()
print(current_working_directory)
###Output
E:\Develop\Python\40_CodeSnippets\PythonReference\Files
###Markdown
current script directoryuse **\_\_file\_\_** to get the location of the current script **works in .py files - does not work in Jupyter Notebook**
###Code
import os
print(__file__)
print(os.path.dirname(__file__))
###Output
_____no_output_____
###Markdown
create directoryuse **makedirs** in to create a directory.
###Code
import os
new_dir = os.path.join(current_working_directory, "new folder")
if not os.path.exists(new_dir):
print(f"creating directory {new_dir}")
os.makedirs(new_dir)
###Output
_____no_output_____
###Markdown
check if file is directory
###Code
print(f"\"{new_dir}\" is directory -> {os.path.isdir(new_dir)}")
###Output
"E:\Develop\Python\40_CodeSnippets\PythonReference\Files\new folder" is directory -> True
###Markdown
delete directory
###Code
if os.path.exists(new_dir):
print(f"deleting directory {new_dir}")
os.remove(new_dir)
###Output
_____no_output_____
###Markdown
file open modes source: https://stackabuse.com/file-handling-in-python/ | mode | description ||:-----|:------------|| r | Opens the file in read-only mode. Starts reading from the beginning of the file and is the **default** mode for the **open()** function. || rb | Opens the file as read-only in binary format and starts reading from the beginning of the file. Whilebinary format can be used for different purposes, it is usually used when dealing with things like images,videos, etc. || r+ | Opens a file for reading and writing, placing the pointer at the beginning of the file. || w | Opens in write-only mode. The pointer is placed at the beginning of the file and this will overwrite any existing file with the same name. It will create a new file if one with the same name doesn't exist. || wb | Opens a write-only file in binary mode. || w+ | Opens a file for writing and reading. || wb+ | Opens a file for writing and reading in binary mode. || a | Opens a file for appending new information to it. The pointer is placed at the end of the file. A new file is created if one with the same name doesn't exist. || ab | Opens a file for appending in binary mode. || a+ | Opens a file for both appending and reading. || ab+ | Opens a file for both appending and reading in binary mode. | create fileuse **open** in mode **wt** to open a file for writing
###Code
with open(file=file_path, mode="wt", encoding="utf8") as file:
print(file)
###Output
<_io.TextIOWrapper name='.\\File.txt' mode='wt' encoding='utf8'>
###Markdown
write to fileuse **file.write** to write to a file. append \n for a line break
###Code
with open(file=file_path, mode="w+", encoding="utf8") as file:
for r in range(1,10):
file.write(f"this is line {r}\n")
###Output
_____no_output_____
###Markdown
close fileuse **file.close** to close a file - although the right way to work with files is using **with open** and there is no need to use **file.close**
###Code
file.close()
###Output
_____no_output_____
###Markdown
copy fileuse **copyfile** from the **shutil** library to copy files
###Code
from shutil import copyfile
dst: str = "CopiedFile.txt"
copyfile(file_name, dst)
###Output
_____no_output_____
###Markdown
read fileuse **with open** to open a file and automatically close it when finished working with it
###Code
with open(file=dst, mode="rt", encoding="utf8") as copied_file:
for line in copied_file:
print(line)
###Output
this is line 1
this is line 2
this is line 3
this is line 4
this is line 5
this is line 6
this is line 7
this is line 8
this is line 9
this is line 1
this is line 2
this is line 3
this is line 4
this is line 5
this is line 6
this is line 7
this is line 8
this is line 9
###Markdown
delete fileuse **os.remove** to delete a file
###Code
import os
dst_path = os.path.join(".", dst)
os.remove(dst_path)
os.remove(file_path)
###Output
_____no_output_____
###Markdown
move filecopied from stackoverflow: https://stackoverflow.com/a/8858026/9351796 **os.rename()**, **shutil.move()**, or **os.replace()**All employ the same syntax:
###Code
import os
import shutil
os.rename("path/to/current/file.foo", "path/to/new/destination/for/file.foo")
shutil.move("path/to/current/file.foo", "path/to/new/destination/for/file.foo")
os.replace("path/to/current/file.foo", "path/to/new/destination/for/file.foo")
###Output
_____no_output_____ |
4-assets/BOOKS/Jupyter-Notebooks/Overflow/Connecting_with_the_Qt_Console.ipynb | ###Markdown
Connecting to an existing IPython kernel using the Qt Console The Frontend/Kernel Model The traditional IPython (`ipython`) consists of a single process that combines a terminal based UI with the process that runs the users code.While this traditional application still exists, the modern Jupyter consists of two processes:* Kernel: this is the process that runs the users code.* Frontend: this is the process that provides the user interface where the user types code and sees results.Jupyter currently has 3 frontends:* Terminal Console (`jupyter console`)* Qt Console (`jupyter qtconsole`)* Notebook (`jupyter notebook`)The Kernel and Frontend communicate over a ZeroMQ/JSON based messaging protocol, which allows multiple Frontends (even of different types) to communicate with a single Kernel. This opens the door for all sorts of interesting things, such as connecting a Console or Qt Console to a Notebook's Kernel. For example, you may want to connect a Qt console to your Notebook's Kernel and use it as a helpbrowser, calling `??` on objects in the Qt console (whose pager is more flexible than theone in the notebook). This Notebook describes how you would connect another Frontend to an IPython Kernel that is associated with a Notebook.The commands currently given here are specific to the IPython kernel. Manual connection To connect another Frontend to a Kernel manually, you first need to find out the connection information for the Kernel using the `%connect_info` magic:
###Code
%connect_info
###Output
_____no_output_____
###Markdown
You can see that this magic displays everything you need to connect to this Notebook's Kernel. Automatic connection using a new Qt Console You can also start a new Qt Console connected to your current Kernel by using the `%qtconsole` magic. This will detect the necessary connectioninformation and start the Qt Console for you automatically.
###Code
a = 10
%qtconsole
###Output
_____no_output_____ |
Aggregation and grouping.ipynb | ###Markdown
TOPLULASTIRMA AGGREGATİONS AND GRUPPİNG* count()* first()* last()* mean()* median()* min()* max()* std()* var()* sum()
###Code
import seaborn as sn
import pandas as pd
import numpy as np
df= sn.load_dataset("planets")
df
?sn.laod_dataset
df.head()
df.distance
df.shape
df.tail()
df.mean()
df.median()
df.describe()
# EGER BELİRBİ BİR DEGİSKEN İÇİN BUNU YAPMAK İSTERSEK ÖNCE DEGSİKENİ SECİP
# SONRA FONKSİYONU EKLERz
df["distance"].describe().T
df["distance"].var() # min,std, sum, tum fonksiyonlrı toplarız.
df.describe().T # sonunda daki T fonksiyonu trasnpoz dur tersinin alır. gözlemleri degiskenler ile yer degistrrr
# EGER FRAMDE Kİ EKSİK VERİLERİ SİLMEK İSTERSEK Kİ DAHA NET BİLGİ VERİR BİZE DROP FONK. KULLANIRZ
df.dropna().describe().T # sonucları karsılstırırsak yukarıdakı tablada farklılakrı göruruz.
h= df.groupby("method")["orbital_period"].describe()
h
h.shape
###Output
_____no_output_____ |
notebooks/04_FSharp.ipynb | ###Markdown
Use DwC-A_dotnet with FDwC-A_dotnet can be used with F as well as C. The NuGet library installation and ArchiveReader/FileReader formatters work in the same way as they do for C.Here we'll use the Papilionidae dataset to demonstrate reading latitude and longitude information from the occurrence data file and plot it on a map of Texas with Plotly.**Import Note:** If you are using this notebook from Binder make sure to select **Kernel -> Change Kernel -> .NET (F)** before running any of the cells below.
###Code
#r "nuget:Plotly.NET,2.0.0-preview.15"
#r "nuget:Plotly.NET.Interactive,2.0.0-preview.15"
#r "nuget:FSharp.Data,4.2.5"
#r "nuget:DwC-A_dotnet,0.6.0"
#r "nuget:DwC-A_dotnet.Interactive,0.1.9-Pre"
open DwC_A
open DwC_A.Terms
open DwC_A.Factories
open DwC_A.Config
open System.IO
let archiveFile = "./data/Papilionidae.zip"
let factory = new DefaultFactory(fun cfg ->
cfg.Add<ArchiveFolderConfiguration>( fun cfg ->
cfg.OutputPath <- "./Papilionidae"
if(Directory.Exists(cfg.OutputPath)) then
Directory.Delete(cfg.OutputPath, true) ))
let archive = new ArchiveReader(archiveFile, factory);
let occurrence = archive.CoreFile;
occurrence
open System.Linq
open Plotly.NET
let lonlat = occurrence.DataRows
.Where(fun row -> row.[Terms.decimalLongitude] <> null && row.[Terms.decimalLatitude] <> null)
.Select(fun row -> ( $"{row.[Terms.genus]} {row.[Terms.specificEpithet]}",
row.[Terms.decimalLongitude] |> double,
row.[Terms.decimalLatitude] |> double) )
.GroupBy(fun row -> match row with (a, b, c) -> a)
.Select(fun group -> (group.Key, group.Select(fun row -> match row with (a, b, c) -> (b, c))))
let geo = lonlat.Select(fun row ->
match row with (a, b) ->
Chart.ScatterGeo(b,
mode=StyleParam.Mode.Markers,
ShowLegend = true)
|> Chart.withMarkerStyle(Size = 2)
|> Chart.withTraceName(a))
|> Chart.combine
let map = geo |> Chart.withGeoStyle(
FitBounds = StyleParam.GeoFitBounds.GeoJson,
Scope = StyleParam.GeoScope.Usa,
ShowLakes = true,
ShowRivers = true,
ShowLand = true,
LandColor = Color.fromHex("#f1f1f1")
)
|> Chart.withSize(height = 500.0, width = 800.0)
|> Chart.withTitle(title = "Papilionidae of Texas")
map
###Output
_____no_output_____ |
battery-state-estimation/results/lg/lstm_soc_percentage_lg_result.ipynb | ###Markdown
Main notebook for battery state estimation
###Code
import numpy as np
import pandas as pd
import scipy.io
import math
import os
import ntpath
import sys
import logging
import time
import sys
from importlib import reload
import plotly.graph_objects as go
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
from keras.layers import LSTM, Embedding, RepeatVector, TimeDistributed, Masking
from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
IS_COLAB = False
if IS_COLAB:
from google.colab import drive
drive.mount('/content/drive')
data_path = "/content/drive/My Drive/battery-state-estimation/battery-state-estimation/"
else:
data_path = "../../"
sys.path.append(data_path)
from data_processing.lg_dataset import LgData
###Output
Using TensorFlow backend.
###Markdown
Config logging
###Code
reload(logging)
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.DEBUG, datefmt='%Y/%m/%d %H:%M:%S')
###Output
_____no_output_____
###Markdown
Load Data
###Code
train_names = [
'25degC/551_LA92',
'25degC/551_Mixed1',
'25degC/551_Mixed2',
'25degC/551_UDDS',
'25degC/551_US06',
'25degC/552_Mixed3',
'25degC/552_Mixed7',
'25degC/552_Mixed8',
]
test_names = [
'25degC/552_Mixed4',
'25degC/552_Mixed5',
'25degC/552_Mixed6',
]
steps = 300
lg_data = LgData(data_path)
cycles = lg_data.get_discharge_whole_cycle(train_names, test_names, output_capacity=False)
train_x, train_y, test_x, test_y = lg_data.get_discharge_multiple_step(cycles, steps)
train_y = lg_data.keep_only_y_end(train_y, steps)
test_y = lg_data.keep_only_y_end(test_y, steps)
# Model definition
#opt = tf.keras.optimizers.Adam(lr=0.00001)
#model = Sequential()
#model.add(LSTM(256, activation='selu',
# return_sequences=True,
# input_shape=(train_x.shape[1], train_x.shape[2])))
#model.add(LSTM(256, activation='selu', return_sequences=False))
#model.add(Dense(256, activation='selu'))
#model.add(Dense(128, activation='selu'))
#model.add(Dense(1, activation='linear'))
#model.summary()
#model.compile(optimizer=opt, loss='huber', metrics=['mse', 'mae', 'mape', tf.keras.metrics.RootMeanSquaredError(name='rmse')
experiment_name = '2020-12-06-19-44-14_lstm_soc_lg'
history = pd.read_csv(data_path + 'results/trained_model/%s_history.csv' % experiment_name)
model = keras.models.load_model(data_path + 'results/trained_model/%s.h5' % experiment_name)
model.summary()
print(history)
###Output
Unnamed: 0 loss mse mae mape rmse val_loss \
0 0 0.007985 0.015969 0.086286 52.072884 0.126369 0.000560
1 1 0.000647 0.001295 0.028370 12.785918 0.035986 0.000469
2 2 0.000446 0.000893 0.022999 10.227242 0.029876 0.000361
3 3 0.000401 0.000802 0.021868 9.565425 0.028311 0.000369
4 4 0.000347 0.000694 0.020349 8.510178 0.026346 0.000330
.. ... ... ... ... ... ... ...
95 95 0.000154 0.000308 0.014032 7.002544 0.017540 0.000262
96 96 0.000151 0.000302 0.013452 6.005038 0.017375 0.000264
97 97 0.000122 0.000243 0.012013 5.263407 0.015600 0.000142
98 98 0.000097 0.000193 0.010652 4.854163 0.013894 0.000136
99 99 0.000131 0.000263 0.012581 5.561449 0.016216 0.000188
val_mse val_mae val_mape val_rmse
0 0.001120 0.026847 11.940112 0.033467
1 0.000939 0.024750 11.601175 0.030635
2 0.000721 0.020993 9.335139 0.026856
3 0.000737 0.022172 9.051208 0.027151
4 0.000659 0.020589 8.731447 0.025679
.. ... ... ... ...
95 0.000524 0.019101 7.324101 0.022892
96 0.000528 0.018558 7.422750 0.022979
97 0.000283 0.013665 5.991758 0.016826
98 0.000271 0.013495 5.336795 0.016472
99 0.000376 0.015249 7.439945 0.019386
[100 rows x 11 columns]
###Markdown
Testing
###Code
results = model.evaluate(test_x, test_y)
print(results)
###Output
23/23 [==============================] - 7s 319ms/step - loss: 2.1433e-04 - mean_squared_error: 4.2867e-04 - mean_absolute_error: 0.0156 - mean_absolute_percentage_error: 7.9237 - rmse: 0.0207
[0.00021433422807604074, 0.0004286684561520815, 0.015640864148736, 7.92370080947876, 0.02070431038737297]
###Markdown
Data Visualization
###Code
fig = go.Figure()
fig.add_trace(go.Scatter(y=history['loss'],
mode='lines', name='train'))
fig.add_trace(go.Scatter(y=history['val_loss'],
mode='lines', name='validation'))
fig.update_layout(title='Loss trend',
xaxis_title='epoch',
yaxis_title='loss',
width=1400,
height=600)
fig.show()
train_predictions = model.predict(train_x)
cycle_num = 0
steps_num = 8000
step_index = np.arange(cycle_num*steps_num, (cycle_num+1)*steps_num)
fig = go.Figure()
fig.add_trace(go.Scatter(x=step_index, y=train_predictions.flatten()[cycle_num*steps_num:(cycle_num+1)*steps_num],
mode='lines', name='SoC predicted'))
fig.add_trace(go.Scatter(x=step_index, y=train_y.flatten()[cycle_num*steps_num:(cycle_num+1)*steps_num],
mode='lines', name='SoC actual'))
fig.update_layout(title='Results on training',
xaxis_title='Step',
yaxis_title='SoC percentage',
width=1400,
height=600)
fig.show()
test_predictions = model.predict(test_x)
cycle_num = 0
steps_num = 8000
step_index = np.arange(cycle_num*steps_num, (cycle_num+1)*steps_num)
fig = go.Figure()
fig.add_trace(go.Scatter(x=step_index, y=test_predictions.flatten()[cycle_num*steps_num:(cycle_num+1)*steps_num],
mode='lines', name='SoC predicted'))
fig.add_trace(go.Scatter(x=step_index, y=test_y.flatten()[cycle_num*steps_num:(cycle_num+1)*steps_num],
mode='lines', name='SoC actual'))
fig.update_layout(title='Results on testing',
xaxis_title='Step',
yaxis_title='SoC percentage',
width=1400,
height=600)
fig.show()
###Output
_____no_output_____ |
examples/sdf_parser_physics_engines.ipynb | ###Markdown
Physics enginesThe description of the physics engine's parameters is one of the most import parts in the world description in Gazebo. A `` can only have one physics element.It can use the following engines: * ODE (`ode`)* Bullet (`bullet`)* Simbody (`simbody`)* DART (`dart`) and a specific SDF block is available to describe each engine.
###Code
# Import the element creator
from pcg_gazebo.parsers.sdf import create_sdf_element
# Create first the global physics block
physics = create_sdf_element('physics')
print(physics)
# The physics engine's configuration modes are named after the
# engine being used, the default being `ode`
physics.reset(mode='ode', with_optional_elements=True)
print(physics)
physics.reset(mode='bullet', with_optional_elements=True)
print(physics)
physics.reset(mode='simbody', with_optional_elements=True)
print(physics)
###Output
<physics name="default_physics" default="1" type="simbody">
<max_step_size>0.001</max_step_size>
<real_time_factor>1</real_time_factor>
<real_time_update_rate>1000</real_time_update_rate>
<max_contacts>20</max_contacts>
<simbody>
<min_step_size>0.0001</min_step_size>
<accuracy>0.001</accuracy>
<max_transient_velocity>0.01</max_transient_velocity>
<contact>
<stiffness>100000000.0</stiffness>
<dissipation>100</dissipation>
<plastic_coef_restitution>0.5</plastic_coef_restitution>
<plastic_impact_velocity>0.5</plastic_impact_velocity>
<static_friction>0.9</static_friction>
<dynamic_friction>0.9</dynamic_friction>
<viscous_friction>0</viscous_friction>
<override_impact_capture_velocity>0.001</override_impact_capture_velocity>
<override_stiction_transition_velocity>0.001</override_stiction_transition_velocity>
</contact>
</simbody>
</physics>
###Markdown
Physics enginesThe description of the physics engine's parameters is one of the most import parts in the world description in Gazebo. A `` can only have one physics element.It can use the following engines: * ODE (`ode`)* Bullet (`bullet`)* Simbody (`simbody`)* DART (`dart`) and a specific SDF block is available to describe each engine.
###Code
# Import the element creator
from pcg_gazebo.parsers.sdf import create_sdf_element
# Create first the global physics block
physics = create_sdf_element('physics')
print(physics)
# The physics engine's configuration modes are named after the
# engine being used, the default being `ode`
physics.reset(mode='ode', with_optional_elements=True)
print(physics)
physics.reset(mode='bullet', with_optional_elements=True)
print(physics)
physics.reset(mode='simbody', with_optional_elements=True)
print(physics)
###Output
<physics name="default_physics" default="1" type="simbody">
<max_step_size>0.001</max_step_size>
<real_time_factor>1</real_time_factor>
<real_time_update_rate>1000</real_time_update_rate>
<max_contacts>20</max_contacts>
<simbody>
<min_step_size>0.0001</min_step_size>
<accuracy>0.001</accuracy>
<max_transient_velocity>0.01</max_transient_velocity>
<contact>
<stiffness>100000000.0</stiffness>
<dissipation>100</dissipation>
<plastic_coef_restitution>0.5</plastic_coef_restitution>
<plastic_impact_velocity>0.5</plastic_impact_velocity>
<static_friction>0.9</static_friction>
<dynamic_friction>0.9</dynamic_friction>
<viscous_friction>0</viscous_friction>
<override_impact_capture_velocity>0.001</override_impact_capture_velocity>
<override_stiction_transition_velocity>0.001</override_stiction_transition_velocity>
</contact>
</simbody>
</physics>
|
benchmarking/Benchmarking_python.ipynb | ###Markdown
Load CSV
###Code
df = pd.read_csv("/home/sahil/Documents/code/store_project/scripts/Names.csv", header=0)
df.head(5)
###Output
_____no_output_____
###Markdown
Load CSV into Local Postgres Database
###Code
POSTGRES_USERNAME = "sahil"
POSTGRES_PASSWORD = "zxcvbnm"
POSTGRES_DBNAME = "postgres"
POSTGRES_HOST = "localhost"
url = 'postgresql://{}:{}@{}:{}/{}'.format(POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, 5432, POSTGRES_DBNAME)
print(url)
engine = create_engine(url)
# %%timeit -r 3
df.to_sql('transaction_data',engine, if_exists='append')
###Output
_____no_output_____
###Markdown
Load CSV into AWS Postgres DatabaseBear in mid this time is affected by your internet speed and RDS instance type.
###Code
POSTGRES_USERNAME = "sahil"
POSTGRES_PASSWORD = "Asdfg1234!"
POSTGRES_DBNAME = "postgres"
POSTGRES_HOST = "postgresdb2.cznthudneeub.eu-west-1.rds.amazonaws.com"
url = 'postgresql://{}:{}@{}:{}/{}'.format(POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, 5432, POSTGRES_DBNAME)
print(url)
engine = create_engine(url)
df.head(1000).to_sql('cfs_python',engine, if_exists='replace')
###Output
_____no_output_____
###Markdown
Load CSV into Local MySQL Database
###Code
POSTGRES_USERNAME = "root"
POSTGRES_PASSWORD = "zxcvbnm"
POSTGRES_DBNAME = "mysql"
POSTGRES_HOST = "localhost"
url = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, 3306, POSTGRES_DBNAME)
print(url)
engine = create_engine(url)
%%timeit -r 3
df.head(100000).to_sql('cfs_python',engine, if_exists='replace')
###Output
_____no_output_____
###Markdown
Load CSV into AWS MySQL Database
###Code
POSTGRES_USERNAME = "sahil"
POSTGRES_PASSWORD = "Asdfg1234!"
POSTGRES_DBNAME = "sahil"
POSTGRES_HOST = "mysql2.cznthudneeub.eu-west-1.rds.amazonaws.com"
url = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, 3306, POSTGRES_DBNAME)
print(url)
engine = create_engine(url)
# %%timeit -r 3
df.head(10000).to_sql('cfs_python',engine, if_exists='replace')
###Output
_____no_output_____
###Markdown
Load CSV into Local SQL Server Database [Install SQL Server](https://docs.microsoft.com/en-us/sql/linux/quickstart-install-connect-ubuntu?view=sql-server-2017)
###Code
import pymysql
POSTGRES_USERNAME = "SA"
POSTGRES_PASSWORD = "Asdfg1234!"
POSTGRES_DBNAME = "sparkdemodb"
POSTGRES_HOST = "localhost"
url = 'mssql+pymssql://{}:{}@{}:{}/{}'.format(POSTGRES_USERNAME, POSTGRES_PASSWORD, POSTGRES_HOST, 3306, POSTGRES_DBNAME)
print(url)
engine = create_engine(url)
engine.table_names()
engine.execute("CREATE DATABASE TestDB1")
###Output
_____no_output_____ |
0.8.0/_downloads/508c2f401de5e085afa802c76e7532f4/plot_display.ipynb | ###Markdown
In this example, notice that we used 'time' for both axis labels.In general, any of the supported modes can be used for either axis.For example, we could also plot the chroma covariance plot withchroma decorations on each axis:
###Code
ccov = np.cov(chroma)
fig, ax = plt.subplots()
img = librosa.display.specshow(ccov, y_axis='chroma', x_axis='chroma',
key='Eb:maj', ax=ax)
ax.set(title='Chroma covariance')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
Color mapsYou may have noticed that the color mappings for the images abovewere selected automatically by `specshow`.This is done by `librosa.display.cmap` according to the following heuristic: - If the data is boolean, use black-and-white - If the data is (mostly) positive or (mostly) negative, use a sequential colormap - If the data contains both positive and negative values, use a diverging colormap.The default sequential colormap is 'magma', which is perceptually uniform andconverts gracefully to grayscale.You can always override this automatic colormap selection by setting anexplicit `cmap`:
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, cmap='gray_r', y_axis='log', x_axis='time', ax=ax)
ax.set(title='Inverted grayscale')
fig.colorbar(img, ax=ax, format="%+2.f dB")
###Output
_____no_output_____
###Markdown
`specshow` uses `matplotlib.pyplot.pcolormesh` to generate the underlying image.Any parameters to `pcolormesh` can be passed through from `specshow`, for example,to set explicit bounds on the minimum and maximum ranges for colors.This can be helpful when centering divergent colormaps around 0 (or some otherreference point).
###Code
max_var = np.max(np.abs(ccov))
fig, ax = plt.subplots()
img = librosa.display.specshow(ccov, vmin=-max_var, vmax=max_var,
y_axis='chroma', x_axis='chroma',
key='Eb:maj', ax=ax)
ax.set(title='Chroma covariance')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
Multiple plotsOften, we'll want to show multiple synchronized features simultaneously.This can be done using matplotlib's `subplot` mechanism and sharing axes.There are many examples of this throughout the librosa documentation, buthere we'll go through it step by step.
###Code
# Construct a subplot grid with 3 rows and 1 column, sharing the x-axis)
fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True)
# On the first subplot, show the original spectrogram
img1 = librosa.display.specshow(S_db, x_axis='time', y_axis='log', ax=ax[0])
ax[0].set(title='STFT (log scale)')
# On the second subplot, show the mel spectrogram
img2 = librosa.display.specshow(M_db, x_axis='time', y_axis='mel', ax=ax[1])
ax[1].set(title='Mel')
# On the third subplot, show the chroma features
img3 = librosa.display.specshow(chroma, x_axis='time', y_axis='chroma',
key='Eb:maj', ax=ax[2])
ax[2].set(title='Chroma')
# To eliminate redundant axis labels, we'll use "label_outer" on all subplots:
for ax_i in ax:
ax_i.label_outer()
# And we can share colorbars:
fig.colorbar(img1, ax=[ax[0], ax[1]])
# Or have individual colorbars:
fig.colorbar(img3, ax=[ax[2]])
# We can then even do fancy things like zoom into a particular time and frequency
# region. Since the axes are shared, this will apply to all three subplots at once.
ax[0].set(xlim=[1, 3]) # Zoom to seconds 1-3
###Output
_____no_output_____
###Markdown
Non-uniform axesAll of the examples so far have used either uniformly, linearly, or geometricallyspaced axes. But sometimes, we have non-uniform sampling of data, and we'd liketo plot it in natural coordinates.One example of this is when using beat-synchronous features in the common casewhere the tempo is not exactly fixed. To demonstrate this, we'll use a longerexample clip.To specify non-uniform axis sampling, you will need to provide the `x_coords`(or `y_coords`) array indicating the position of each sample, as demonstratedbelow.
###Code
y, sr = librosa.load(librosa.ex('nutcracker'))
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
# beats contains the frame indices of each detected beat
# for synchronization and visualization, we'll need to expand this
# to cover the limits of the data. This can be done as follows:
beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
# Now beat-synchronize the chroma features
chroma_sync = librosa.util.sync(chroma, beats, aggregate=np.median)
# For visualization, we can convert to time (in seconds)
beat_times = librosa.frames_to_time(beats)
# We'll plot the synchronized and unsynchronized features next
# to each other
fig, ax = plt.subplots(nrows=2, sharex=True)
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time', ax=ax[0],
key='Eb:maj')
ax[0].set(title='Uniform time sampling')
ax[0].label_outer()
librosa.display.specshow(chroma_sync, y_axis='chroma', x_axis='time',
x_coords=beat_times, ax=ax[1], key='Eb:maj')
ax[1].set(title='Beat-synchronous sampling')
fig.colorbar(img, ax=ax)
# For clarity, we'll zoom in on a 15-second patch
ax[1].set(xlim=[10, 25])
###Output
_____no_output_____
###Markdown
Using display.specshowThis notebook gives a more in-depth demonstration of all things that `specshow`can do to help generate beautiful visualizations of spectro-temporal data.
###Code
# Code source: Brian McFee
# License: ISC
# sphinx_gallery_thumbnail_number = 15
###Output
_____no_output_____
###Markdown
All of librosa's plotting functions rely on matplotlib.To demonstrate everything we can do, it will help toimport matplotlib's pyplot API here.
###Code
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
###Output
_____no_output_____
###Markdown
First, we'll load in a demo track
###Code
y, sr = librosa.load(librosa.ex('trumpet'))
###Output
_____no_output_____
###Markdown
The first thing we might want to do is display an ordinary(linear) spectrogram.We'll do this by first computing the short-time Fouriertransform, and then mapping the magnitudes to a decibelscale.
###Code
D = librosa.stft(y) # STFT of y
S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
###Output
_____no_output_____
###Markdown
If you're familiar with matplotlib already, you may knowthat there are two ways of using it: the `pyplot` interfaceand the object-oriented interface.Both are supported by librosa, as we'll show here.First, the pyplot interface:
###Code
plt.figure()
librosa.display.specshow(S_db)
plt.colorbar()
###Output
_____no_output_____
###Markdown
And now the object-oriented interface
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, ax=ax)
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
Both figures are identical, but they use different programminginterfaces to construct. Most people find the pyplot interfaceto be quicker to learn, but the object-oriented interface canbe a little more flexible for complex figures.For the remainder of this example, we'll use the object-orientedinterface. Decorating your plotThe figure above conveys the basic content of the spectrogram,but it's missing axis labels. Without that information, it'simpossible for a reader to know how to interpret the visualization.specshow provides many helpers to automatically decorate the axesof your plot. For the plot above, our x-axis corresponds to time,and our y-axis corresponds to linearly spaced frequencies producedby the discrete Fourier transform.We can tell specshow to decorate the axes accordingly:
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
ax.set(title='Now with labeled axes!')
fig.colorbar(img, ax=ax, format="%+2.f dB")
###Output
_____no_output_____
###Markdown
This is much better already! Note that we also added a format stringto the colorbar, so readers know how to read the color scale. Changing axis scalesThe linear frequency scale is sometimes helpful, but often it candifficult to read. Alternatively, it is common to use a logarithmicfrequency axis. This has the benefit that every octave occupiesa constant vertical extent.We can tell specshow to use log-scaled frequency axes just as above:
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(S_db, x_axis='time', y_axis='log', ax=ax)
ax.set(title='Using a logarithmic frequency axis')
fig.colorbar(img, ax=ax, format="%+2.f dB")
###Output
_____no_output_____
###Markdown
Changing the analysis parametersThe default parameter settings used by librosa (e.g., `sr=22050`, `hop_length=512`,etc) may not be appropriate for every signal.If you change a parameter from its default value, e.g. when computing an STFT,you can pass that same parameter to `specshow`.This ensures that axis scales (e.g. time or frequency) are computed correctly.
###Code
fig, ax = plt.subplots()
D_highres = librosa.stft(y, hop_length=256, n_fft=4096)
S_db_hr = librosa.amplitude_to_db(np.abs(D_highres), ref=np.max)
img = librosa.display.specshow(S_db_hr, hop_length=256, x_axis='time', y_axis='log',
ax=ax)
ax.set(title='Higher time and frequency resolution')
fig.colorbar(img, ax=ax, format="%+2.f dB")
###Output
_____no_output_____
###Markdown
Note that only the parameters which are strictly necessary are supported by`specshow`. For example, without the `hop_length`, we wouldn't know how totranslate frame indices to time indices. However, `n_fft` is *not* needed,because it can be inferred from the shape of the input spectrogram.A full list of the supported parameters is provided in the`librosa.display.specshow` documentation. Other types of spectral dataThe examples above illustrate how to plot linear spectrograms,but librosa provides many kinds of spectral representations:Mel-scaled, constant-Q, variable-Q, chromagrams, tempograms, etc.specshow can plot these just as well. For example, a Mel spectrogramcan be displayed as follows:
###Code
fig, ax = plt.subplots()
M = librosa.feature.melspectrogram(y=y, sr=sr)
M_db = librosa.power_to_db(M, ref=np.max)
img = librosa.display.specshow(M_db, y_axis='mel', x_axis='time', ax=ax)
ax.set(title='Mel spectrogram display')
fig.colorbar(img, ax=ax, format="%+2.f dB")
###Output
_____no_output_____
###Markdown
Constant-Q plots, and other logarithmically scaled frequency representationssuch as Variable-Q or `iirt` can be decorated using either the frequencies (Hz)or their note names in scientific pitch notation:
###Code
C = librosa.cqt(y=y, sr=sr)
C_db = librosa.amplitude_to_db(np.abs(C), ref=np.max)
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_hz', x_axis='time', ax=ax)
ax.set(title='Frequency (Hz) axis decoration')
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_note', x_axis='time', ax=ax)
ax.set(title='Pitch axis decoration')
###Output
_____no_output_____
###Markdown
In the latter case, the underlying data representation is still measured inHz; only the tick labels are changed. Chroma representations don't have a fixed frequency axis, and instead aggregateinformation across all frequencies corresponding to a given pitch class.specshow can plot these too:
###Code
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time', ax=ax)
ax.set(title='Chromagram demonstration')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
If you also happen to know the key of the piece being analyzed, you canpass this to specshow and it will spell the notes properly:
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time',
key='Eb:maj', ax=ax)
ax.set(title='Chromagram explicitly in Eb:maj')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
This will also work for 'cqt_note' mode. Indian notation systems
###Code
# The examples above use Western music notation to identify pitch classes, but we can
# also decorate axes with either Hindustani or Carnatic svara classes.
#
# These are specified by using `y_axis='chroma_h'` or `'chroma_c'`, respectively.
#
# Just as with key identification in the chroma example above, you can specify the
# thaat (Hindustani) or melakarta number or name (Carnatic) to notate the plot.
###Output
_____no_output_____
###Markdown
For example, the example above is in Eb:maj (or, more accurately, F:dorian),which we can also represent in Hindustani notation as Sa=5 (F) and 'kafi' thaat:
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma_h', x_axis='time',
Sa=5, thaat='kafi', ax=ax)
ax.set(title='Chromagram with Hindustani notation')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
In Carnatic notation, we would use melakarta 22.Note: `thaat` is optional for Hindustani notation, but `mela` is required forCarnatic.
###Code
fig, ax = plt.subplots()
img = librosa.display.specshow(chroma, y_axis='chroma_c', x_axis='time',
Sa=5, mela=22, ax=ax)
ax.set(title='Chromagram with Carnatic notation')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____
###Markdown
These notation schemes can also be used in cqt plots by specifying`y_axis='cqt_svara'`.In this mode, `Sa` must be specified in Hz. Carnatic notation is usedif `mela` is provided, and Hindustani is used if not.Individual svara are only notated if the display range is sufficiently small,so we'll zoom into a single octave for this example.
###Code
Sa = librosa.note_to_hz('F4')
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_svara', Sa=Sa, x_axis='time', ax=ax)
ax.set(title='Hindustani decoration',
ylim=[Sa, 2*Sa])
fig, ax = plt.subplots()
librosa.display.specshow(C_db, y_axis='cqt_svara', Sa=Sa, mela=22, x_axis='time', ax=ax)
ax.set(title='Carnatic decoration',
ylim=[Sa, 2*Sa])
###Output
_____no_output_____
###Markdown
Non-spectral dataspecshow can also be used for data that isn't exactly spectro-temporal.One common application is recurrence (self-similarity) plots, whichare time-by-time, as illustrated below.
###Code
R = librosa.segment.recurrence_matrix(chroma, mode='affinity')
fig, ax = plt.subplots()
img = librosa.display.specshow(R, y_axis='time', x_axis='time', ax=ax)
ax.set(title='Recurrence / self-similarity')
fig.colorbar(img, ax=ax)
###Output
_____no_output_____ |
members/keve/6.HF/course_7_initial_task.ipynb | ###Markdown
find the number of strings in a list of strings that contain a given letterupper or lower case> ```["abfg", "Bcd", "Ijk"], "b" -> 2```
###Code
from jkg_evaluators import letter_occurrences
list_of_strings=["abfg", "Bcd", "Ijk"]
letter=str("b")
def find_letter_occurrences (list_of_strings,letter):
#deklarálok egy új list nevű változót, melynek típusa lista
list=[]
#a "list_of_strings elemeinek számval megegyező számú csak nullákat tartalmazó listává alakítom a list változót
for q in list_of_strings:
list.append(0)
#az i felveszi a list_of_strings adott elemének elemszámát - majd ezt futtatom mindegyik elemre
for i in range (len(list_of_strings)):
# az i-edik elemének egyesével megnézem az összes karakterét iterációval, az aktuális karaktert mindig az f jelöli
for f in (list_of_strings[i]):
#egy elágazást tartalmaz a függvény, ha az adott f karakter megegyezik a keresett értékkel akkor teljesíti ami az elágazásban van, ha nem akkor nem
if f.upper()==letter.upper():
#a list nevű váltó esetén 1-es értéket ad azon elemeknek, amelyek esetén teljesült a feltétel
list[i]=1
#deklarlom a megoldas nevu változót
megoldas=0
#iterálom a lista elemeit
for l in list:
#a list nevű lista elemeit - mely 0-t tartalmaz, ha az adott értékre nem tejesül a feltétel és 1-et ha igen - egyesével hozzáadom a megoldashoz
megoldas+=l
#visszatérési értékként megadom a "megoldas" változó értékét
return (megoldas)
print (fgv(list_of_strings,letter))
print(find_letter_occurrences)
letter_occurrences.evaluate(find_letter_occurrences)
###Output
- success rate: 495/495 (100.0%)
- error count: 0
- best performance: 1
- worst performance: 1
- mean performance: 1.0
|
6- Data Visualization/Top 5 Data Visualization Libraries Tutorial.ipynb | ###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 25/01/2019> You may be interested have a look at 10 Steps to Become a Data Scientist: 1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)6. You are in the Sixth step7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](21) 1. [Setup](22) 1. [Data Collection](23)1. [Data Visualization Libraries](4)1. [Matplotlib](4) 1. [Scatterplots](41) 1. [ Line Plots](42) 1. [Bar Charts](43) 1. [Histograms](44) 1. [Box and Whisker Plots](45) 1. [Heatmaps](46) 1. [Animations](47) 1. [Interactivity](48) 1. [DataFrame.plot](49)1. [Seaborn](5) 1. [Seaborn Vs Matplotlib](51) 1. [Useful Python Data Visualization Libraries](52)1. [Plotly](6) 1. [New to Plotly?](61) 1. [Plotly Offline from Command Line](62)1. [Bokeh](7)1. [networkx](8)1. [Read more](9) 1. [Courses](91) 1. [Ebooks](92) 1. [Cheat sheet](93)1. [Conclusion](10) 1. [References](11) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages:
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from bokeh.io import push_notebook, show, output_notebook
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
from bokeh.plotting import figure, output_file, show
from bokeh.io import show, output_notebook
import matplotlib.animation as animation
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
import plotly.figure_factory as ff
import matplotlib.pylab as pylab
from ipywidgets import interact
import plotly.graph_objs as go
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from sklearn import datasets
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from sklearn import datasets
import plotly.offline as py
from random import randint
from plotly import tools
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it (MJ Bahmani)
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
**>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
We can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()**
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: 1. the anatomy of a plot 1. pyplot 1. pylab1. and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment. We can use html cell magic to display the image.
###Code
#import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.4, 3.8, 1.2, 2.5], [15, 25, 9, 26], color='darkgreen', marker='o')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the **Matplotlib Python** Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code.[7] 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornSeaborn is an open source, BSD-licensed Python library providing high level API for visualizing the data using Python programming language.[9][Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[9][Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
py.init_notebook_mode(connected=True)
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
from sklearn.decomposition import PCA
X_reduced = PCA(n_components=3).fit_transform(iris.data)
trace = go.Scatter3d(x=X_reduced[:, 0],
y=X_reduced[:, 1],
z=X_reduced[:, 2],
mode='markers',
marker=dict(
size=6,
color=np.random.randn(150),
colorscale='Viridis',
opacity=0.8)
)
layout=go.Layout(title='First three PCA directions',
scene=dict(
xaxis=dict(title='1st eigenvector'),
yaxis=dict(title='2nd eigenvector'),
zaxis=dict(title='3rd eigenvector'))
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
np.random.seed(5)
fig = tools.make_subplots(rows=2, cols=3,
print_grid=False,
specs=[[{'is_3d': True}, {'is_3d': True}, {'is_3d': True}],
[ {'is_3d': True, 'rowspan':1}, None, None]])
scene = dict(
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=2.5, y=0.1, z=0.1)
),
xaxis=dict(
range=[-1, 4],
title='Petal width',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
yaxis=dict(
range=[4, 8],
title='Sepal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
zaxis=dict(
range=[1,8],
title='Petal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
)
)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
est.fit(X)
labels = est.labels_
trace = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=labels.astype(np.float),
line=dict(color='black', width=1)
))
fig.append_trace(trace, 1, fignum)
fignum = fignum + 1
y = np.choose(y, [1, 2, 0]).astype(np.float)
trace1 = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=y,
line=dict(color='black', width=1)))
fig.append_trace(trace1, 2, 1)
fig['layout'].update(height=900, width=900,
margin=dict(l=10,r=10))
py.iplot(fig)
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.[11]Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
# bokeh basics
# Create a blank figure with labels
p = figure(plot_width = 600, plot_height = 600,
title = 'Example Glyphs',
x_axis_label = 'X', y_axis_label = 'Y')
# Example data
squares_x = [1, 3, 4, 5, 8]
squares_y = [8, 7, 3, 1, 10]
circles_x = [9, 12, 4, 3, 15]
circles_y = [8, 4, 11, 6, 10]
# Add squares glyph
p.square(squares_x, squares_y, size = 12, color = 'navy', alpha = 0.6)
# Add circle glyph
p.circle(circles_x, circles_y, size = 12, color = 'red')
# Set to output the plot in the notebook
output_notebook()
# Show the plot
show(p)
###Output
_____no_output_____
###Markdown
8- NetworkX**NetworkX** is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
###Code
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 31/12/2018> You may be interested have a look at 10 Steps to Become a Data Scientist: 1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)6. [Data visualization](https://www.kaggle.com/mjbahmani/top-5-data-visualization-libraries-tutorial)7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- a simple example you will learn in this notebook
###Code
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](21) 1. [Setup](22) 1. [Data Collection](23)1. [Matplotlib](3) 1. [Scatterplots](31) 1. [ Line Plots](32) 1. [Bar Charts](33) 1. [Histograms](34) 1. [Box and Whisker Plots](35) 1. [Heatmaps](36) 1. [Animations](37) 1. [Interactivity](38) 1. [DataFrame.plot](39)1. [Seaborn](40) 1. [Seaborn Vs Matplotlib](37) 1. [Useful Python Data Visualization Libraries](38)1. [Plotly](60) 1. [New to Plotly?](61) 1. [Plotly Offline from Command Line](62)1. [Bokeh](63)1. [networkx](64)1. [Read more](39) 1. [Courses](40) 1. [Ebooks](41) 1. [Cheat sheet](41)1. [Conclusion](39) 1. [References](40) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages:
###Code
from matplotlib.figure import Figure
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
**>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
But if we had , we can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()**
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: the anatomy of a plot, pyplot and pylab, and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment. We can use html cell magic to display the image.
###Code
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.3, 3.8, 1.2, 2.5], [11, 25, 9, 26], color='darkgreen', marker='^')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the Matplotlib Python Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code. 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
from random import randint
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
import matplotlib.animation as animation
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornAs you have just read, **Seaborn** is complimentary to Matplotlib and it specifically targets statistical data visualization. But it goes even further than that: Seaborn extends Matplotlib and that’s why it can address the two biggest frustrations of working with Matplotlib. Or, as Michael Waskom says in the “introduction to Seaborn”: “If matplotlib “tries to make easy things easy and hard things possible”, seaborn tries to make a well-defined set of hard things easy too.”One of these hard things or frustrations had to do with the default Matplotlib parameters. Seaborn works with different parameters, which undoubtedly speaks to those users that don’t use the default looks of the Matplotlib plotsSeaborn is a library for making statistical graphics in Python. It is built on top of matplotlib and closely integrated with pandas data structures.Here is some of the functionality that seaborn offers:A dataset-oriented API for examining relationships between multiple variablesSpecialized support for using categorical variables to show observations or aggregate statisticsOptions for visualizing univariate or bivariate distributions and for comparing them between subsets of dataAutomatic estimation and plotting of linear regression models for different kinds dependent variablesConvenient views onto the overall structure of complex datasetsHigh-level abstractions for structuring multi-plot grids that let you easily build complex visualizationsConcise control over matplotlib figure styling with several built-in themesTools for choosing color palettes that faithfully reveal patterns in your dataSeaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots.Here’s an example of what this means:[Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from plotly import tools
from sklearn import datasets
import plotly.figure_factory as ff
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
###Output
_____no_output_____
###Markdown
8- NetworkXNetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
###Code
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from plotly import tools
from sklearn import datasets
import plotly.figure_factory as ff
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
###Output
_____no_output_____
###Markdown
8- NetworkXNetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
###Code
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 07/01/2019> You may be interested have a look at 10 Steps to Become a Data Scientist: 1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)6. [Data visualization](https://www.kaggle.com/mjbahmani/top-5-data-visualization-libraries-tutorial)7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- a simple example you will learn in this notebook
###Code
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](21) 1. [Setup](22) 1. [Data Collection](23)1. [Matplotlib](3) 1. [Scatterplots](31) 1. [ Line Plots](32) 1. [Bar Charts](33) 1. [Histograms](34) 1. [Box and Whisker Plots](35) 1. [Heatmaps](36) 1. [Animations](37) 1. [Interactivity](38) 1. [DataFrame.plot](39)1. [Seaborn](4) 1. [Seaborn Vs Matplotlib](41) 1. [Useful Python Data Visualization Libraries](42)1. [Plotly](5) 1. [New to Plotly?](51) 1. [Plotly Offline from Command Line](52)1. [Bokeh](6)1. [networkx](7)1. [Read more](8) 1. [Courses](81) 1. [Ebooks](82) 1. [Cheat sheet](83)1. [Conclusion](9) 1. [References](10) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages:
###Code
from matplotlib.figure import Figure
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
**>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
But if we had , we can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()**
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: the anatomy of a plot, pyplot and pylab, and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment. We can use html cell magic to display the image.
###Code
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.3, 3.8, 1.2, 2.5], [11, 25, 9, 26], color='darkgreen', marker='^')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the Matplotlib Python Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code. 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
from random import randint
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
import matplotlib.animation as animation
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornAs you have just read, **Seaborn** is complimentary to Matplotlib and it specifically targets statistical data visualization. But it goes even further than that: Seaborn extends Matplotlib and that’s why it can address the two biggest frustrations of working with Matplotlib. Or, as Michael Waskom says in the “introduction to Seaborn”: “If matplotlib “tries to make easy things easy and hard things possible”, seaborn tries to make a well-defined set of hard things easy too.”One of these hard things or frustrations had to do with the default Matplotlib parameters. Seaborn works with different parameters, which undoubtedly speaks to those users that don’t use the default looks of the Matplotlib plotsSeaborn is a library for making statistical graphics in Python. It is built on top of matplotlib and closely integrated with pandas data structures.Here is some of the functionality that seaborn offers:A dataset-oriented API for examining relationships between multiple variablesSpecialized support for using categorical variables to show observations or aggregate statisticsOptions for visualizing univariate or bivariate distributions and for comparing them between subsets of dataAutomatic estimation and plotting of linear regression models for different kinds dependent variablesConvenient views onto the overall structure of complex datasetsHigh-level abstractions for structuring multi-plot grids that let you easily build complex visualizationsConcise control over matplotlib figure styling with several built-in themesTools for choosing color palettes that faithfully reveal patterns in your dataSeaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots.Here’s an example of what this means:[Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 11/29/2018> You may be interested have a look at it: [**10-Steps-to-Become-a-Data-Scientist**](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](3) 1. [Setup](4) 1. [Data Collection](5)1. [Matplotlib](26) 1. [Scatterplots](27) 1. [ Line Plots](28) 1. [Bar Charts](29) 1. [Histograms](30) 1. [Box and Whisker Plots](31) 1. [Heatmaps](32) 1. [Animations](33) 1. [Interactivity](34) 1. [DataFrame.plot](35)1. [Seaborn](36) 1. [Seaborn Vs Matplotlib](37) 1. [Useful Python Data Visualization Libraries](38)1. [Plotly](60) 1. [New to Plotly?](61) 1. [Plotly Offline from Command Line](62)1. [Bokeh](63)1. [Read more](39) 1. [Courses](40) 1. [Ebooks](41) 1. [Cheat sheet](41)1. [Conclusion](39) 1. [References](40) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages: Now we import all of them
###Code
from wordcloud import WordCloud as wc
from matplotlib.figure import Figure
from nltk.corpus import stopwords
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import nltk
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
6 Data Collection Rules for Your Future Perfect Machine Learning Dataset:1. Ensure the data has no gaps1. Keep your raw data raw1. Foresee and document all the possible missing values and outliers in your data1. Changelogs and data structures versioning 1. Ensure the data points can’t get lost1. Hire a data officer. [reference](https://towardsdatascience.com/how-to-collect-your-deep-learning-dataset-2e0eefc0ba24) **>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
But if we had , we can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-4 Data CleaningWhen dealing with real-world data, dirty data is the norm rather than the exception. We continuously need to predict correct values, impute missing ones, and find links between various data artefacts such as schemas and records. We need to stop treating data cleaning as a piecemeal exercise (resolving different types of errors in isolation), and instead leverage all signals and resources (such as constraints, available statistics, and dictionaries) to accurately predict corrective actions.The primary goal of data cleaning is to detect and remove errors and **anomalies** to increase the value of data in analytics and decision making. While it has been the focus of many researchers for several years, individual problems have been addressed separately. These include missing value imputation, outliers detection, transformations, integrity constraints violations detection and repair, consistent query answering, deduplication, and many other related problems such as profiling and constraints mining.[4] [Go to top](top) How many NA elements in every column!!Good news, it is Zero!to check out how many null info are on the dataset, we can use **isnull().sum()**.
###Code
#How many NA elements in every column
#command--> 10
users.isnull().sum()
kernels.isnull().sum()
#command--> 11
kernels.groupby('Medal').count()
###Output
_____no_output_____
###Markdown
To print dataset **columns**, we can use columns atribute.
###Code
kernels.columns
###Output
_____no_output_____
###Markdown
**>**In pandas's data frame you can perform some query such as "where". 2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: the anatomy of a plot, pyplot and pylab, and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment.
###Code
# because the default is the line style '-',
# nothing will be shown if we only pass in one point (3,2)
plt.plot(3, 2)
# we can pass in '.' to plt.plot to indicate that we want
# the point (3,2) to be indicated with a marker '.'
plt.plot(3, 2, '.')
###Output
_____no_output_____
###Markdown
Let's see how to make a plot without using the scripting layer. We can use html cell magic to display the image.
###Code
# create a new figure
plt.figure()
# plot the point (3,2) using the circle marker
plt.plot(3, 2, 'o')
# get the current axes
ax = plt.gca()
# Set axis properties [xmin, xmax, ymin, ymax]
ax.axis([0,6,0,10])
# create a new figure
plt.figure()
# plot the point (1.5, 1.5) using the circle marker
plt.plot(1.5, 1.5, 'o')
# plot the point (2, 2) using the circle marker
plt.plot(2, 2, 'o')
# plot the point (2.5, 2.5) using the circle marker
plt.plot(2.5, 2.5, 'o')
# get current axes
ax = plt.gca()
# get all the child objects the axes contains
ax.get_children()
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.3, 3.8, 1.2, 2.5], [11, 25, 9, 26], color='darkgreen', marker='^')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the Matplotlib Python Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code. 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
# convert the two lists into a list of pairwise tuples
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
print(list(zip_generator))
# the above prints:
# [(1, 6), (2, 7), (3, 8), (4, 9), (5, 10)]
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
# The single star * unpacks a collection into positional arguments
print(*zip_generator)
# the above prints:
# (1, 6) (2, 7) (3, 8) (4, 9) (5, 10)
# use zip to convert 5 tuples with 2 elements each to 2 tuples with 5 elements each
print(list(zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10))))
# the above prints:
# [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
# let's turn the data back into 2 lists
x, y = zip(*zip_generator) # This is like calling zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10))
print(x)
print(y)
# the above prints:
# (1, 2, 3, 4, 5)
# (6, 7, 8, 9, 10)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
# add a label to the x axis
plt.xlabel('The number of times the child kicked a ball')
# add a label to the y axis
plt.ylabel('The grade of the student')
# add a title
plt.title('Relationship between ball kicking and grades')
# add a legend (uses the labels from plt.scatter)
plt.legend()
# add the legend to loc=4 (the lower right hand corner), also gets rid of the frame and adds a title
plt.legend(loc=4, frameon=False, title='Legend')
# get children from current axes (the legend is the second to last item in this list)
plt.gca().get_children()
# get the legend from the current axes
legend = plt.gca().get_children()[-2]
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
#command--> 18
yourkernels.columns
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
f,ax=plt.subplots(1,2,figsize=(18,8))
yourkernels['Medal'].value_counts().plot.bar(color=['#CD7F32','#FFDF00','#D3D3D3'],ax=ax[0])
ax[0].set_title('Number Of Medal')
ax[0].set_ylabel('Count')
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
from random import randint
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
plt.figure()
# the right hand side is equivalent shorthand syntax
plt.subplot(1,2,1) == plt.subplot(121)
# create a 3x3 grid of subplots
fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True)
# plot the linear_data on the 5th subplot axes
ax5.plot(linear_data, '-')
# set inside tick labels to visible
for ax in plt.gcf().get_axes():
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_visible(True)
plt.show()
# necessary on some systems to update the plot
plt.gcf().canvas.draw()
plt.show()
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
# use gridspec to partition the figure into subplots
import matplotlib.gridspec as gridspec
plt.figure()
gspec = gridspec.GridSpec(3, 3)
top_histogram = plt.subplot(gspec[0, 1:])
side_histogram = plt.subplot(gspec[1:, 0])
lower_right = plt.subplot(gspec[1:, 1:])
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
lower_right.scatter(X, Y)
top_histogram.hist(X, bins=100)
s = side_histogram.hist(Y, bins=100, orientation='horizontal')
# clear the histograms and plot normed histograms
top_histogram.clear()
top_histogram.hist(X, bins=100, normed=True)
side_histogram.clear()
side_histogram.hist(Y, bins=100, orientation='horizontal', normed=True)
# flip the side histogram's x axis
side_histogram.invert_xaxis()
# change axes limits
for ax in [top_histogram, lower_right]:
ax.set_xlim(0, 1)
for ax in [side_histogram, lower_right]:
ax.set_ylim(-5, 5)
# histograms
#command--> 24
yourkernels.hist(figsize=(15,20))
plt.figure()
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
df.describe()
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
import matplotlib.animation as animation
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornAs you have just read, **Seaborn** is complimentary to Matplotlib and it specifically targets statistical data visualization. But it goes even further than that: Seaborn extends Matplotlib and that’s why it can address the two biggest frustrations of working with Matplotlib. Or, as Michael Waskom says in the “introduction to Seaborn”: “If matplotlib “tries to make easy things easy and hard things possible”, seaborn tries to make a well-defined set of hard things easy too.”One of these hard things or frustrations had to do with the default Matplotlib parameters. Seaborn works with different parameters, which undoubtedly speaks to those users that don’t use the default looks of the Matplotlib plotsSeaborn is a library for making statistical graphics in Python. It is built on top of matplotlib and closely integrated with pandas data structures.Here is some of the functionality that seaborn offers:A dataset-oriented API for examining relationships between multiple variablesSpecialized support for using categorical variables to show observations or aggregate statisticsOptions for visualizing univariate or bivariate distributions and for comparing them between subsets of dataAutomatic estimation and plotting of linear regression models for different kinds dependent variablesConvenient views onto the overall structure of complex datasetsHigh-level abstractions for structuring multi-plot grids that let you easily build complex visualizationsConcise control over matplotlib figure styling with several built-in themesTools for choosing color palettes that faithfully reveal patterns in your dataSeaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots.Here’s an example of what this means:[Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from plotly import tools
from sklearn import datasets
import plotly.figure_factory as ff
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
###Output
_____no_output_____
###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 25/01/2019> You may be interested have a look at 10 Steps to Become a Data Scientist: 1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)6. You are in the Sixth step7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](21) 1. [Setup](22) 1. [Data Collection](23)1. [Data Visualization Libraries](4)1. [Matplotlib](4) 1. [Scatterplots](41) 1. [ Line Plots](42) 1. [Bar Charts](43) 1. [Histograms](44) 1. [Box and Whisker Plots](45) 1. [Heatmaps](46) 1. [Animations](47) 1. [Interactivity](48) 1. [DataFrame.plot](49)1. [Seaborn](5) 1. [Seaborn Vs Matplotlib](51) 1. [Useful Python Data Visualization Libraries](52)1. [Plotly](6) 1. [New to Plotly?](61) 1. [Plotly Offline from Command Line](62)1. [Bokeh](7)1. [networkx](8)1. [Read more](9) 1. [Courses](91) 1. [Ebooks](92) 1. [Cheat sheet](93)1. [Conclusion](10) 1. [References](11) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages:
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from bokeh.io import push_notebook, show, output_notebook
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
from bokeh.plotting import figure, output_file, show
from bokeh.io import show, output_notebook
import matplotlib.animation as animation
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
import plotly.figure_factory as ff
import matplotlib.pylab as pylab
from ipywidgets import interact
import plotly.graph_objs as go
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from sklearn import datasets
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from sklearn import datasets
import plotly.offline as py
from random import randint
from plotly import tools
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it (MJ Bahmani)
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
**>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
We can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()**
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: 1. the anatomy of a plot 1. pyplot 1. pylab1. and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment. We can use html cell magic to display the image.
###Code
#import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.4, 3.8, 1.2, 2.5], [15, 25, 9, 26], color='darkgreen', marker='o')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the **Matplotlib Python** Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code.[7] 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()n = 100
x = np.random.randn(n)
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornSeaborn is an open source, BSD-licensed Python library providing high level API for visualizing the data using Python programming language.[9][Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[9][Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show() # Not done
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
py.init_notebook_mode(connected=True)
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
from sklearn.decomposition import PCA
X_reduced = PCA(n_components=3).fit_transform(iris.data)
trace = go.Scatter3d(x=X_reduced[:, 0],
y=X_reduced[:, 1],
z=X_reduced[:, 2],
mode='markers',
marker=dict(
size=6,
color=np.random.randn(150),
colorscale='Viridis',
opacity=0.8)
)
layout=go.Layout(title='First three PCA directions',
scene=dict(
xaxis=dict(title='1st eigenvector'),
yaxis=dict(title='2nd eigenvector'),
zaxis=dict(title='3rd eigenvector'))
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
np.random.seed(5)
fig = tools.make_subplots(rows=2, cols=3,
print_grid=False,
specs=[[{'is_3d': True}, {'is_3d': True}, {'is_3d': True}],
[ {'is_3d': True, 'rowspan':1}, None, None]])
scene = dict(
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=2.5, y=0.1, z=0.1)
),
xaxis=dict(
range=[-1, 4],
title='Petal width',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
yaxis=dict(
range=[4, 8],
title='Sepal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
zaxis=dict(
range=[1,8],
title='Petal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
)
)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
est.fit(X)
labels = est.labels_
trace = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=labels.astype(np.float),
line=dict(color='black', width=1)
))
fig.append_trace(trace, 1, fignum)
fignum = fignum + 1
y = np.choose(y, [1, 2, 0]).astype(np.float)
trace1 = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=y,
line=dict(color='black', width=1)))
fig.append_trace(trace1, 2, 1)
fig['layout'].update(height=900, width=900,
margin=dict(l=10,r=10))
py.iplot(fig)
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.[11]Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
# bokeh basics
# Create a blank figure with labels
p = figure(plot_width = 600, plot_height = 600,
title = 'Example Glyphs',
x_axis_label = 'X', y_axis_label = 'Y')
# Example data
squares_x = [1, 3, 4, 5, 8]
squares_y = [8, 7, 3, 1, 10]
circles_x = [9, 12, 4, 3, 15]
circles_y = [8, 4, 11, 6, 10]
# Add squares glyph
p.square(squares_x, squares_y, size = 12, color = 'navy', alpha = 0.6)
# Add circle glyph
p.circle(circles_x, circles_y, size = 12, color = 'red')
# Set to output the plot in the notebook
output_notebook()
# Show the plot
show(p)
###Output
_____no_output_____
###Markdown
8- NetworkX**NetworkX** is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
###Code
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____
###Markdown
Top 5 Data Visualization Libraries Tutorial last update: 25/01/2019> You may be interested have a look at 10 Steps to Become a Data Scientist: 1. [Leren Python](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-1)2. [Python Packages](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)3. [Mathematics and Linear Algebra](https://www.kaggle.com/mjbahmani/linear-algebra-for-data-scientists)4. [Programming & Analysis Tools](https://www.kaggle.com/mjbahmani/20-ml-algorithms-15-plot-for-beginners)5. [Big Data](https://www.kaggle.com/mjbahmani/a-data-science-framework-for-quora)6. You are in the Sixth step7. [Data Cleaning](https://www.kaggle.com/mjbahmani/machine-learning-workflow-for-house-prices)8. [How to solve a Problem?](https://www.kaggle.com/mjbahmani/the-data-scientist-s-toolbox-tutorial-2)9. [Machine Learning](https://www.kaggle.com/mjbahmani/a-comprehensive-ml-workflow-with-python)10. [Deep Learning](https://www.kaggle.com/mjbahmani/top-5-deep-learning-frameworks-tutorial)---------------------------------------------------------------------You can Fork and Run this kernel on Github:> [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist)------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some UPVOTES would be very much appreciated** ----------- Notebook Content1. [Introduction](1)1. [Loading Packages](2) 1. [version](21) 1. [Setup](22) 1. [Data Collection](23)1. [Data Visualization Libraries](4)1. [Matplotlib](4) 1. [Scatterplots](41) 1. [ Line Plots](42) 1. [Bar Charts](43) 1. [Histograms](44) 1. [Box and Whisker Plots](45) 1. [Heatmaps](46) 1. [Animations](47) 1. [Interactivity](48) 1. [DataFrame.plot](49)1. [Seaborn](5) 1. [Seaborn Vs Matplotlib](51) 1. [Useful Python Data Visualization Libraries](52)1. [Plotly](6) 1. [New to Plotly?](61) 1. [Plotly Offline from Command Line](62)1. [Bokeh](7)1. [networkx](8)1. [Read more](9) 1. [Courses](91) 1. [Ebooks](92) 1. [Cheat sheet](93)1. [Conclusion](10) 1. [References](11) 1- IntroductionIf you've followed my other kernels so far. You have noticed that for those who are beginners, I've introduced a course " 10 Steps to Become a Data Scientist ". In this kernel we will start another step with each other. There are plenty of Kernels that can help you learn Python 's Libraries from scratch but here in Kaggle, I want to Analysis Meta Kaggle a popular Dataset.After reading, you can use it to Analysis other real dataset and use it as a template to deal with ML problems.It is clear that everyone in this community is familiar with Meta Kaggle dataset but if you need to review your information about the datasets please visit [meta-kaggle](https://www.kaggle.com/kaggle/meta-kaggle) .I am open to getting your feedback for improving this **kernel** together. 2- Loading PackagesIn this kernel we are using the following packages:
###Code
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from bokeh.io import push_notebook, show, output_notebook
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
from bokeh.plotting import figure, output_file, show
from bokeh.io import show, output_notebook
import matplotlib.animation as animation
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
import plotly.figure_factory as ff
import matplotlib.pylab as pylab
from ipywidgets import interact
import plotly.graph_objs as go
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from sklearn import datasets
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools
from sklearn import datasets
import plotly.offline as py
from random import randint
from plotly import tools
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
import string
import numpy
import csv
import os
###Output
_____no_output_____
###Markdown
2-1 version
###Code
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
#print('wordcloud: {}'.format(wordcloud.version))
###Output
_____no_output_____
###Markdown
2-2 SetupA few tiny adjustments for better **code readability**
###Code
sns.set(style='white', context='notebook', palette='deep')
pylab.rcParams['figure.figsize'] = 12,8
warnings.filterwarnings('ignore')
mpl.style.use('ggplot')
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
2-3 Data Collection**Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia]I start Collection Data by the Users and Kernels datasets into **Pandas DataFrames**
###Code
# import kernels and users to play with it (MJ Bahmani)
#command--> 1
users = pd.read_csv("../input/Users.csv")
kernels = pd.read_csv("../input/Kernels.csv")
messages = pd.read_csv("../input/ForumMessages.csv")
###Output
_____no_output_____
###Markdown
**>*** Each row is an observation (also known as : sample, example, instance, record)* Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) [Go to top](top)
###Code
#command--> 2
users.sample(1)
###Output
_____no_output_____
###Markdown
Please **replace** your username and find your useridWe suppose that userid==authoruserid and use userid for both kernels and users dataset
###Code
username="mjbahmani"
userid=int(users[users['UserName']=="mjbahmani"].Id)
userid
###Output
_____no_output_____
###Markdown
We can just use **dropna()**(be careful sometimes you should not do this!)
###Code
# remove rows that have NA's
print('Before Droping',messages.shape)
#command--> 3
messages = messages.dropna()
print('After Droping',messages.shape)
###Output
_____no_output_____
###Markdown
2-3-1 FeaturesFeatures can be from following types:1. numeric1. categorical1. ordinal1. datetime1. coordinatesFind the type of features in **Meta Kaggle**?!For getting some information about the dataset you can use **info()** command [Go to top](top)
###Code
#command--> 4
print(users.info())
###Output
_____no_output_____
###Markdown
2-3-2 Explorer Dataset1. Dimensions of the dataset.1. Peek at the data itself.1. Statistical summary of all attributes.1. Breakdown of the data by the class variable.Don’t worry, each look at the data is **one command**. These are useful commands that you can use again and again on future projects. [Go to top](top)
###Code
# shape
#command--> 5
print(users.shape)
#columns*rows
#command--> 6
users.size
###Output
_____no_output_____
###Markdown
We can get a quick idea of how many instances (rows) and how many attributes (columns) the data contains with the shape property. You see number of unique item for Species with command below:
###Code
#command--> 7
kernels['Medal'].unique()
#command--> 8
kernels["Medal"].value_counts()
###Output
_____no_output_____
###Markdown
To check the first 5 rows of the data set, we can use head(5).
###Code
kernels.head(5)
###Output
_____no_output_____
###Markdown
To check out last 5 row of the data set, we use tail() function
###Code
#command--> 9
users.tail()
###Output
_____no_output_____
###Markdown
To pop up 5 random rows from the data set, we can use **sample(5)** function
###Code
kernels.sample(5)
###Output
_____no_output_____
###Markdown
To give a statistical summary about the dataset, we can use **describe()**
###Code
kernels.describe()
###Output
_____no_output_____
###Markdown
2-3-5 Find yourself in Users datset
###Code
#command--> 12
users[users['Id']==userid]
###Output
_____no_output_____
###Markdown
2-3-6 Find your kernels in Kernels dataset
###Code
#command--> 13
yourkernels=kernels[kernels['AuthorUserId']==userid]
yourkernels.head(2)
###Output
_____no_output_____
###Markdown
3- Data Visualization LibrariesBefore you start learning , I am giving an overview of 10 interdisciplinary **Python data visualization libraries**, from the well-known to the obscure.* 1- matplotlibmatplotlib is the O.G. of Python data visualization libraries. Despite being over a decade old, it’s still the most widely used library for plotting in the Python community. It was designed to closely resemble MATLAB, a proprietary programming language developed in the 1980s.* 2- SeabornSeaborn harnesses the power of matplotlib to create beautiful charts in a few lines of code. The key difference is Seaborn’s default styles and color palettes, which are designed to be more aesthetically pleasing and modern. Since Seaborn is built on top of matplotlib, you’ll need to know matplotlib to tweak Seaborn’s defaults.* 3- ggplotggplot is based on ggplot2, an R plotting system, and concepts from The Grammar of Graphics. ggplot operates differently than matplotlib: it lets you layer components to create a complete plot. For instance, you can start with axes, then add points, then a line, a trendline, etc. Although The Grammar of Graphics has been praised as an “intuitive” method for plotting, seasoned matplotlib users might need time to adjust to this new mindset.* 4- BokehLike ggplot, Bokeh is based on The Grammar of Graphics, but unlike ggplot, it’s native to Python, not ported over from R. Its strength lies in the ability to create interactive, web-ready plots, which can be easily outputted as JSON objects, HTML documents, or interactive web applications. Bokeh also supports streaming and real-time data.* 5- pygalLike Bokeh and Plotly, pygal offers interactive plots that can be embedded in the web browser. Its prime differentiator is the ability to output charts as SVGs. As long as you’re working with smaller datasets, SVGs will do you just fine. But if you’re making charts with hundreds of thousands of data points, they’ll have trouble rendering and become sluggish.* 6- PlotlyYou might know Plotly as an online platform for data visualization, but did you also know you can access its capabilities from a Python notebook? Like Bokeh, Plotly’s forte is making interactive plots, but it offers some charts you won’t find in most libraries, like contour plots, dendograms, and 3D charts.* 7- geoplotlibgeoplotlib is a toolbox for creating maps and plotting geographical data. You can use it to create a variety of map-types, like choropleths, heatmaps, and dot density maps. You must have Pyglet (an object-oriented programming interface) installed to use geoplotlib. Nonetheless, since most Python data visualization libraries don’t offer maps, it’s nice to have a library dedicated solely to them.* 8- GleamGleam is inspired by R’s Shiny package. It allows you to turn analyses into interactive web apps using only Python scripts, so you don’t have to know any other languages like HTML, CSS, or JavaScript. Gleam works with any Python data visualization library. Once you’ve created a plot, you can build fields on top of it so users can filter and sort data.* 9- missingnoDealing with missing data is a pain. missingno allows you to quickly gauge the completeness of a dataset with a visual summary, instead of trudging through a table. You can filter and sort data based on completion or spot correlations with a heatmap or a dendrogram.* 10- LeatherLeather’s creator, Christopher Groskopf, puts it best: “Leather is the Python charting library for those who need charts now and don’t care if they’re perfect.” It’s designed to work with all data types and produces charts as SVGs, so you can scale them without losing image quality. Since this library is relatively new, some of the documentation is still in progress. The charts you can make are pretty basic—but that’s the intention.At the end, nice cheatsheet on how to best visualize your data. I think I will print it out as a good reminder of "best practices". Check out the link for the complete cheatsheet, also as a PDF. * 11- ChartifyChartify is a Python library that makes it easy for data scientists to create charts.Why use Chartify?1. Consistent input data format: Spend less time transforming data to get your charts to work. All plotting functions use a consistent tidy input data format.1. Smart default styles: Create pretty charts with very little customization required.1. Simple API: We've attempted to make to the API as intuitive and easy to learn as possible.1. Flexibility: Chartify is built on top of Bokeh, so if you do need more control you can always fall back on Bokeh's API.Link: https://github.com/mjbahmani/Machine-Learning-Workflow-with-Python![cheatsheet ][1][Reference][2] [1]: http://s8.picofile.com/file/8340669884/53f6a826_d7df_4b55_81e6_7c23b3fff0a3_original.png [2]: https://blog.modeanalytics.com/python-data-visualization-libraries/ 4- MatplotlibThis Matplotlib tutorial takes you through the basics Python data visualization: 1. the anatomy of a plot 1. pyplot 1. pylab1. and much more [Go to top](top) You can show matplotlib figures directly in the notebook by using the `%matplotlib notebook` and `%matplotlib inline` magic commands. `%matplotlib notebook` provides an interactive environment. We can use html cell magic to display the image.
###Code
#import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [10, 20, 25, 30], color='lightblue', linewidth=3)
plt.scatter([0.4, 3.8, 1.2, 2.5], [15, 25, 9, 26], color='darkgreen', marker='o')
plt.xlim(0.5, 4.5)
plt.show()
###Output
_____no_output_____
###Markdown
Simple and powerful visualizations can be generated using the **Matplotlib Python** Library. More than a decade old, it is the most widely-used library for plotting in the Python community. A wide range of graphs from histograms to heat plots to line plots can be plotted using Matplotlib.Many other libraries are built on top of Matplotlib and are designed to work in conjunction with analysis, it being the first Python data visualization library. Libraries like pandas and matplotlib are “wrappers” over Matplotlib allowing access to a number of Matplotlib’s methods with less code.[7] 4-1 Scatterplots
###Code
x = np.array([1,2,3,4,5,6,7,8])
y = x
plt.figure()
plt.scatter(x, y) # similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
x = np.random.randint(low=1, high=11, size=50)
y = x + np.random.randint(1, 5, size=x.size)
data = np.column_stack((x, y))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(8, 4))
ax1.scatter(x=x, y=y, marker='o', c='r', edgecolor='b')
ax1.set_title('Scatter: $x$ versus $y$')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax2.hist(data, bins=np.arange(data.min(), data.max()),
label=('x', 'y'))
ax2.legend(loc=(0.65, 0.8))
ax2.set_title('Frequencies of $x$ and $y$')
ax2.yaxis.tick_right()
# Modify the graph above by assigning each species an individual color.
#command--> 19
x=yourkernels["TotalVotes"]
y=yourkernels["TotalViews"]
plt.scatter(x, y)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4-2 Line Plots
###Code
linear_data = np.array([1,2,3,4,5,6,7,8])
exponential_data = linear_data**2
plt.figure()
# plot the linear data and the exponential data
plt.plot(linear_data, '-o', exponential_data, '-o')
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
###Output
_____no_output_____
###Markdown
4-3 Bar Charts
###Code
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3)
new_xvals = []
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals, exponential_data, width = 0.3 ,color='red')
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r')
# Initialize the plot
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# or replace the three lines of code above by the following line:
#fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,10))
# Plot the data
ax1.bar([1,2,3],[3,4,5])
ax2.barh([0.5,1,2.5],[0,1,2])
# Show the plot
plt.show()
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
###Output
_____no_output_____
###Markdown
4-4 Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
###Output
_____no_output_____
###Markdown
It looks like perhaps two of the input variables have a Gaussian distribution. This is useful to note as we can use algorithms that can exploit this assumption.
###Code
yourkernels["TotalViews"].hist();
yourkernels["TotalComments"].hist();
sns.factorplot('TotalViews','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-5 Box and Whisker PlotsIn descriptive statistics, a **box plot** or boxplot is a method for graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.[wikipedia]
###Code
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
4-6 Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
###Output
_____no_output_____
###Markdown
4-7 Animations
###Code
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
4-8 Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
###Output
_____no_output_____
###Markdown
4-9 DataFrame.plot
###Code
np.random.seed(123)
df = pd.DataFrame({'A': np.random.randn(365).cumsum(0),
'B': np.random.randn(365).cumsum(0) + 20,
'C': np.random.randn(365).cumsum(0) - 20},
index=pd.date_range('1/1/2017', periods=365))
df.head()
df.plot('A','B', kind = 'scatter');
###Output
_____no_output_____
###Markdown
You can also choose the plot kind by using the `DataFrame.plot.kind` methods instead of providing the `kind` keyword argument.`kind` :- `'line'` : line plot (default)- `'bar'` : vertical bar plot- `'barh'` : horizontal bar plot- `'hist'` : histogram- `'box'` : boxplot- `'kde'` : Kernel Density Estimation plot- `'density'` : same as 'kde'- `'area'` : area plot- `'pie'` : pie plot- `'scatter'` : scatter plot- `'hexbin'` : hexbin plot [Go to top](top)
###Code
# create a scatter plot of columns 'A' and 'C', with changing color (c) and size (s) based on column 'B'
df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax = df.plot.scatter('A', 'C', c='B', s=df['B'], colormap='viridis')
ax.set_aspect('equal')
df.plot.box();
df.plot.hist(alpha=0.7);
###Output
_____no_output_____
###Markdown
[Kernel density estimation plots](https://en.wikipedia.org/wiki/Kernel_density_estimation) are useful for deriving a smooth continuous function from a given sample.
###Code
df.plot.kde();
###Output
_____no_output_____
###Markdown
5- SeabornSeaborn is an open source, BSD-licensed Python library providing high level API for visualizing the data using Python programming language.[9][Go to top](top) 5-1 Seaborn Vs MatplotlibIt is summarized that if Matplotlib “tries to make easy things easy and hard things possible”, Seaborn tries to make a well defined set of hard things easy too.”Seaborn helps resolve the two major problems faced by Matplotlib; the problems are* Default Matplotlib parameters* Working with data framesAs Seaborn compliments and extends Matplotlib, the learning curve is quite gradual. If you know Matplotlib, you are already half way through Seaborn.Important Features of SeabornSeaborn is built on top of Python’s core visualization library Matplotlib. It is meant to serve as a complement, and not a replacement. However, Seaborn comes with some very important features. Let us see a few of them here. The features help in −* Built in themes for styling matplotlib graphics* Visualizing univariate and bivariate data* Fitting in and visualizing linear regression models* Plotting statistical time series data* Seaborn works well with NumPy and Pandas data structures* It comes with built in themes for styling Matplotlib graphicsIn most cases, you will still use Matplotlib for simple plotting. The knowledge of Matplotlib is recommended to tweak Seaborn’s default plots.[9][Go to top](top)
###Code
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sinplot()
plt.show()
def sinplot(flip = 1):
x = np.linspace(0, 14, 100)
for i in range(1, 5):
plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip)
sns.set()
sinplot()
plt.show()
np.random.seed(1234)
v1 = pd.Series(np.random.normal(0,10,1000), name='v1')
v2 = pd.Series(2*v1 + np.random.normal(60,15,1000), name='v2')
plt.figure()
plt.hist(v1, alpha=0.7, bins=np.arange(-50,150,5), label='v1');
plt.hist(v2, alpha=0.7, bins=np.arange(-50,150,5), label='v2');
plt.legend();
plt.figure()
# we can pass keyword arguments for each individual component of the plot
sns.distplot(v2, hist_kws={'color': 'Teal'}, kde_kws={'color': 'Navy'});
sns.jointplot(v1, v2, alpha=0.4);
grid = sns.jointplot(v1, v2, alpha=0.4);
grid.ax_joint.set_aspect('equal')
sns.jointplot(v1, v2, kind='hex');
# set the seaborn style for all the following plots
sns.set_style('white')
sns.jointplot(v1, v2, kind='kde', space=0);
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
# violinplots on petal-length for each species
#command--> 24
sns.violinplot(data=yourkernels,x="TotalViews", y="TotalVotes")
# violinplots on petal-length for each species
sns.violinplot(data=yourkernels,x="TotalComments", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalVotes")
sns.violinplot(data=yourkernels,x="Medal", y="TotalComments")
###Output
_____no_output_____
###Markdown
How many NA elements in every column. 5-2 kdeplot
###Code
# seaborn's kdeplot, plots univariate or bivariate density estimates.
#Size can be changed by tweeking the value used
#command--> 25
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalComments").add_legend()
plt.show()
sns.FacetGrid(yourkernels, hue="Medal", size=5).map(sns.kdeplot, "TotalVotes").add_legend()
plt.show()
f,ax=plt.subplots(1,3,figsize=(20,8))
sns.distplot(yourkernels[yourkernels['Medal']==1].TotalVotes,ax=ax[0])
ax[0].set_title('TotalVotes in Medal 1')
sns.distplot(yourkernels[yourkernels['Medal']==2].TotalVotes,ax=ax[1])
ax[1].set_title('TotalVotes in Medal 2')
sns.distplot(yourkernels[yourkernels['Medal']==3].TotalVotes,ax=ax[2])
ax[2].set_title('TotalVotes in Medal 3')
plt.show()
###Output
_____no_output_____
###Markdown
5-3 jointplot
###Code
# Use seaborn's jointplot to make a hexagonal bin plot
#Set desired size and ratio and choose a color.
#command--> 25
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=10,ratio=10, kind='hex',color='green')
plt.show()
###Output
_____no_output_____
###Markdown
5-4 andrews_curves
###Code
# we will use seaborn jointplot shows bivariate scatterplots and univariate histograms with Kernel density
# estimation in the same figure
sns.jointplot(x="TotalVotes", y="TotalViews", data=yourkernels, size=6, kind='kde', color='#800000', space=0)
###Output
_____no_output_____
###Markdown
5-5 Heatmap
###Code
#command--> 26
plt.figure(figsize=(10,7))
sns.heatmap(yourkernels.corr(),annot=True,cmap='cubehelix_r') #draws heatmap with input as the correlation matrix calculted by(iris.corr())
plt.show()
sns.factorplot('TotalComments','TotalVotes',data=yourkernels)
plt.show()
###Output
_____no_output_____
###Markdown
5-6 distplot
###Code
sns.distplot(yourkernels['TotalVotes']);
###Output
_____no_output_____
###Markdown
6- PlotlyHow to use **Plotly** offline inside IPython notebooks. 6-1 New to Plotly?Plotly, also known by its URL, Plot.ly, is a technical computing company headquartered in Montreal, Quebec, that develops online data analytics and visualization tools. Plotly provides online graphing, analytics, and statistics tools for individuals and collaboration, as well as scientific graphing libraries for Python, R, MATLAB, Perl, Julia, Arduino, and REST.[Go to top](top)
###Code
# example for plotly
py.init_notebook_mode(connected=True)
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
trace = go.Scatter(x=X[:, 0],
y=X[:, 1],
mode='markers',
marker=dict(color=np.random.randn(150),
size=10,
colorscale='Viridis',
showscale=False))
layout = go.Layout(title='Training Points',
xaxis=dict(title='Sepal length',
showgrid=False),
yaxis=dict(title='Sepal width',
showgrid=False),
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
from sklearn.decomposition import PCA
X_reduced = PCA(n_components=3).fit_transform(iris.data)
trace = go.Scatter3d(x=X_reduced[:, 0],
y=X_reduced[:, 1],
z=X_reduced[:, 2],
mode='markers',
marker=dict(
size=6,
color=np.random.randn(150),
colorscale='Viridis',
opacity=0.8)
)
layout=go.Layout(title='First three PCA directions',
scene=dict(
xaxis=dict(title='1st eigenvector'),
yaxis=dict(title='2nd eigenvector'),
zaxis=dict(title='3rd eigenvector'))
)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig)
###Output
_____no_output_____
###Markdown
6-2 Plotly Offline from Command LineYou can plot your graphs from a python script from command line. On executing the script, it will open a web browser with your Plotly Graph drawn.[Go to top](top)
###Code
plot([go.Scatter(x=[1, 2, 3], y=[3, 1, 6])])
np.random.seed(5)
fig = tools.make_subplots(rows=2, cols=3,
print_grid=False,
specs=[[{'is_3d': True}, {'is_3d': True}, {'is_3d': True}],
[ {'is_3d': True, 'rowspan':1}, None, None]])
scene = dict(
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=2.5, y=0.1, z=0.1)
),
xaxis=dict(
range=[-1, 4],
title='Petal width',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
yaxis=dict(
range=[4, 8],
title='Sepal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
),
zaxis=dict(
range=[1,8],
title='Petal length',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
showticklabels=False, ticks=''
)
)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
est.fit(X)
labels = est.labels_
trace = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=labels.astype(np.float),
line=dict(color='black', width=1)
))
fig.append_trace(trace, 1, fignum)
fignum = fignum + 1
y = np.choose(y, [1, 2, 0]).astype(np.float)
trace1 = go.Scatter3d(x=X[:, 3], y=X[:, 0], z=X[:, 2],
showlegend=False,
mode='markers',
marker=dict(
color=y,
line=dict(color='black', width=1)))
fig.append_trace(trace1, 2, 1)
fig['layout'].update(height=900, width=900,
margin=dict(l=10,r=10))
py.iplot(fig)
###Output
_____no_output_____
###Markdown
7- Bokeh**Bokeh** is a large library that exposes many capabilities, so this section is only a quick tour of some common Bokeh use cases and workflows. For more detailed information please consult the full User Guide.[11]Let’s begin with some examples. Plotting data in basic Python lists as a line plot including zoom, pan, save, and other tools is simple and straightforward:[Go to top](top)
###Code
output_notebook()
x = np.linspace(0, 2*np.pi, 2000)
y = np.sin(x)
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
###Output
_____no_output_____
###Markdown
When you execute this script, you will see that a new output file "lines.html" is created, and that a browser automatically opens a new tab to display it. (For presentation purposes we have included the plot output directly inline in this document.)The basic steps to creating plots with the bokeh.plotting interface are:Prepare some dataIn this case plain python lists, but could also be NumPy arrays or Pandas series.Tell Bokeh where to generate outputIn this case using output_file(), with the filename "lines.html". Another option is output_notebook() for use in Jupyter notebooks.Call figure()This creates a plot with typical default options and easy customization of title, tools, and axes labels.Add renderersIn this case, we use line() for our data, specifying visual customizations like colors, legends and widths.Ask Bokeh to show() or save() the results.These functions save the plot to an HTML file and optionally display it in a browser.Steps three and four can be repeated to create more than one plot, as shown in some of the examples below.The bokeh.plotting interface is also quite handy if we need to customize the output a bit more by adding more data series, glyphs, logarithmic axis, and so on. It’s also possible to easily combine multiple glyphs together on one plot as shown below:[Go to top](top)
###Code
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
y0 = [i**2 for i in x]
y1 = [10**i for i in x]
y2 = [10**(i**2) for i in x]
# create a new plot
p = figure(
tools="pan,box_zoom,reset,save",
y_axis_type="log", y_range=[0.001, 10**11], title="log axis example",
x_axis_label='sections', y_axis_label='particles'
)
# add some renderers
p.line(x, x, legend="y=x")
p.circle(x, x, legend="y=x", fill_color="white", size=8)
p.line(x, y0, legend="y=x^2", line_width=3)
p.line(x, y1, legend="y=10^x", line_color="red")
p.circle(x, y1, legend="y=10^x", fill_color="red", line_color="red", size=6)
p.line(x, y2, legend="y=10^x^2", line_color="orange", line_dash="4 4")
# show the results
show(p)
# bokeh basics
# Create a blank figure with labels
p = figure(plot_width = 600, plot_height = 600,
title = 'Example Glyphs',
x_axis_label = 'X', y_axis_label = 'Y')
# Example data
squares_x = [1, 3, 4, 5, 8]
squares_y = [8, 7, 3, 1, 10]
circles_x = [9, 12, 4, 3, 15]
circles_y = [8, 4, 11, 6, 10]
# Add squares glyph
p.square(squares_x, squares_y, size = 12, color = 'navy', alpha = 0.6)
# Add circle glyph
p.circle(circles_x, circles_y, size = 12, color = 'red')
# Set to output the plot in the notebook
output_notebook()
# Show the plot
show(p)
###Output
_____no_output_____
###Markdown
8- NetworkX**NetworkX** is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.
###Code
import sys
import matplotlib.pyplot as plt
import networkx as nx
G = nx.grid_2d_graph(5, 5) # 5x5 grid
# print the adjacency list
for line in nx.generate_adjlist(G):
print(line)
# write edgelist to grid.edgelist
nx.write_edgelist(G, path="grid.edgelist", delimiter=":")
# read edgelist from grid.edgelist
H = nx.read_edgelist(path="grid.edgelist", delimiter=":")
nx.draw(H)
plt.show()
from ipywidgets import interact
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.show()
interact(plot_random_graph, n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={
'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
});
###Output
_____no_output_____ |
07-machine-learning/labs/1_[EDA]_titanic-first-kaggle-challenge-in-python.ipynb | ###Markdown
First Kaggle challenge - Titanic Machine Learning from Disaster- v1.6_032020- author: marcusRB- [Kaggle - Titanic challenge](https://www.kaggle.com/c/titanic/)```In this version I use only few feature, I try an another cleansing method.I use same ML algorithms```This is the legendary Titanic ML competition – the best, first challenge for you to dive into ML competitions and familiarize yourself with how the Kaggle platform works.The competition is simple: use machine learning to create a model that predicts which passengers survived the Titanic shipwreck. The ChallengeThe sinking of the Titanic is one of the most infamous shipwrecks in history.On April 15, 1912, during her maiden voyage, the widely considered “unsinkable” RMS Titanic sank after colliding with an iceberg. Unfortunately, there weren’t enough lifeboats for everyone onboard, resulting in the death of 1502 out of 2224 passengers and crew.While there was some element of luck involved in surviving, it seems some groups of people were more likely to survive than others.In this challenge, we ask you to build a predictive model that answers the question: “what sorts of people were more likely to survive?” using passenger data (ie name, age, gender, socio-economic class, etc). What Data Will I Use in This Competition?In this competition, you’ll gain access to two similar datasets that include passenger information like name, age, gender, socio-economic class, etc. One dataset is titled `train.csv` and the other is titled `test.csv`.Train.csv will contain the details of a subset of the passengers on board (891 to be exact) and importantly, will reveal whether they survived or not, also known as the “ground truth”.The `test.csv` dataset contains similar information but does not disclose the “ground truth” for each passenger. It’s your job to predict these outcomes.Using the patterns you find in the train.csv data, predict whether the other 418 passengers on board (found in test.csv) survived. Workflow stagesThe competition solution workflow goes through seven stages described in the Data Science Solutions book.1. Question or problem definition.2. Acquire training and testing data.3. Wrangle, prepare, cleanse the data.4. Analyze, identify patterns, and explore the data.5. Model, predict and solve the problem.6. Visualize, report, and present the problem solving steps and final solution.7. Supply or submit the results. Check the versions of libraries
###Code
# Check the versions of libraries MacOS
# Python version
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
# Check the versions of libraries Win10 Docker
#!pip install --upgrade pandas
#!pip install --upgrade sklearn
#!pip install kaggle
#!pip install keras
#!pip install tensorflow
###Output
_____no_output_____
###Markdown
*** Import Libraries
###Code
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
from scipy.stats import norm, skew
from scipy import stats
import xlrd, xdrlib
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# data mining
#from sklearn.impute import KNNImputer, MissingIndicator, SimpleImputer
from sklearn import impute
#from sklearn_pandas import categorical_imputer, CategoricalImputer
from sklearn.pipeline import make_pipeline, make_union, Pipeline
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# machine learning
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
## scikit modeling libraries
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier,
VotingClassifier)
from sklearn.model_selection import (GridSearchCV, cross_val_score, cross_val_predict,
StratifiedKFold, learning_curve)
## Neural Network
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
## Load metrics for predictive modeling
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import RFE, rfe
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import mean_absolute_error, mean_squared_error
## Warnings and other tools
import itertools
import warnings
warnings.filterwarnings("ignore")
###Output
/opt/conda/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
Using TensorFlow backend.
###Markdown
*** Load datasetKaggle we provide two datasets: train and test in csv extension. So, we check and analyze only train file.
###Code
#!chmod 600 /home/jovyan/.kaggle/kaggle.json
#!
#!unzip -o titanic.zip
# Load dataset train and test
train_titanic = pd.read_csv('train.csv')
test_titanic = pd.read_csv('test.csv')
ids = test_titanic['PassengerId']
# concat these two datasets, this will come handy while processing the data
titanic_list = pd.concat(objs=[train_titanic, test_titanic], axis=0).reset_index(drop=True)
titanic_list
train_titanic.shape, test_titanic.shape
train_titanic.head(10)
train_titanic.tail(10)
titanic_list[891:]
###Output
_____no_output_____
###Markdown
*** Data descriptionNuestro conjunto de datos tiene 12 columnas o varables, de las cuales 3 (Age, Cabin y Embarked) tienen valores no disponibles. La variable que queremos predecir es Survived, que nos indica si el pasajero sobrevivió a la tragedia del Titanic.
###Code
# Check dataframe structure
titanic_list.info()
# Check dataframe basic stats data
#stats(titanic_list)
# Check test dataframe basic stats data
titanic_list.describe()
###Output
_____no_output_____
###Markdown
*** EDA, Visualization and transformation dataWe analyze all variable one by one and check null value, errors or we create new variables.
###Code
# Check null and NA values for both dataset
titanic_list.isna().sum()
# Table of relative frequency
titanic_list.isnull().sum()/len(titanic_list)*100
###Output
_____no_output_____
###Markdown
We need to check those 3 features, but it must probable remove `Cabin`, there are many null values. `PassengerId`Id of the passenger. We remove it because haven't predictive weight on our model.
###Code
# Check first 10 elements
titanic_list['PassengerId'].head(10)
# Remove PassengerId variable only for train dataset
titanic_list.drop(['PassengerId'], axis=1, inplace=True)
# Check train dataset
titanic_list.head()
###Output
_____no_output_____
###Markdown
`Survived`This is our depedent variable or predictor, it check if passenger survived (`1`) or not (`0`). Almost 38% of passenger survived.
###Code
titanic_list['Survived'][:891] = titanic_list['Survived'][:891].astype(int)
sns.barplot(x="Survived", data=titanic_list)
titanic_list.describe()['Survived']
###Output
_____no_output_____
###Markdown
`Pclass`Ticket class. This is a categorical feature with 3 different values, first class, second class and third class. Exist high correlation between this feature with dependent variable.
###Code
titanic_list[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean()
sns.barplot(x="Pclass", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`Sex`Passenger genre. It's a categorical feature with two values, `male` y `female`. We converted it a dummy or binary value.
###Code
# Now visualization of 'Gender'
# Printing counts and percentage of male and female
print(titanic_list['Sex'].value_counts(sort=False))
print(titanic_list['Sex'].value_counts(sort=False,normalize=True))
# Making variable categorical
#sub['SEX'] = sub['SEX'].astype('category')
# Visualising counts of Gender with bar graph
sns.countplot(x="Sex", data=titanic_list);
plt.xlabel('Gender')
plt.ylabel('Frequency')
plt.title('Count of Gender')
# Showing proportion of survival for different type of gender
sns.catplot(x="Sex", y="Survived", data=titanic_list, kind="bar", ci=None)
plt.xlabel('Gender')
plt.ylabel('Survive Percentage')
plt.title('Survive v/s Sex')
# Check the survived ratio with sex
titanic_list[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean()
sns.barplot(x="Sex", y="Survived", data=titanic_list)
# Convert categorical variable to binary variable - female 1 and male 0
titanic_list['Sex'] = titanic_list['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
# Check Sex features
titanic_list.head()
###Output
_____no_output_____
###Markdown
`SibSp`Numerical feature. Indicate a sibling of passenger.
###Code
titanic_list[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean()
sns.barplot(x="SibSp", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`Parch`Father and childs of passenger. Numerical variable
###Code
titanic_list[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean()
sns.barplot(x="Parch", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`FamilySize`Create new feature, called FamilySize, where we summarize `SibSp` and `Parch` as numerical variable.
###Code
# Create a new feature, family size included passenger alone
titanic_list['FamilySize'] = titanic_list['SibSp'] + titanic_list['Parch'] + 1
titanic_list[["FamilySize", "Survived"]].groupby(['FamilySize'], as_index=False).mean()
sns.barplot(x="FamilySize", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`IsAlone`We create new feature caracterized if passanger travel alone or not, based on familySize. The binary feature is called `IsAlone`.
###Code
titanic_list['IsAlone'] = 0
titanic_list.loc[titanic_list['FamilySize'] == 1, 'IsAlone'] = 1
# Check new feature with predictor
titanic_list[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
sns.barplot(x="IsAlone", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`Ticket`Ticket number of the passanger. In first instance isn't important for the model. We removed it.
###Code
# We remove Ticket variable in both traing and test dataset
titanic_list.drop(['Ticket'], axis=1, inplace=True)
# We check the dataset again - train
titanic_list.head(10)
###Output
_____no_output_____
###Markdown
`Embarked`This feature is Port of Embarkation. There are three categorical variables: `C` for Cherbourg, `Q` for Queenstown, `S` for Southampton.
###Code
# Check ratio Embarked and Survived variable
titanic_list[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
# Fill na or null values with the most frequent value, C
freq_port = titanic_list.Embarked.dropna().mode()[0]
freq_port
# Assign result on the dataset
titanic_list['Embarked'] = titanic_list['Embarked'].fillna(freq_port)
# Check ratio Embarked and Survived variable
titanic_list[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
sns.barplot(x="Embarked", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`Fare`This continuous numerical variable is ticket fare of the passenger.
###Code
# Doing same steps before for the second dataset
s = titanic_list['Fare'].value_counts(normalize=True)
missing = titanic_list['Fare'].isnull()
titanic_list.loc[missing,'Fare'] = np.random.choice(s.index, size=len(titanic_list[missing]),
p=s.values).astype(int)
titanic_list['Fare']
sns.distplot(titanic_list['Fare'], fit=norm)
###Output
_____no_output_____
###Markdown
After check the variable, has a normal distribution. We apply a logarithm to normalize.
###Code
titanic_list['Fare'] = np.log1p(titanic_list['Fare'],)
sns.distplot(titanic_list['Fare'], fit=norm)
###Output
_____no_output_____
###Markdown
We transform to categorical variable
###Code
titanic_list['FareGroup'] = pd.qcut(titanic_list['Fare'], 5, labels=['A', 'B', 'C', 'D', 'E'])
titanic_list[['FareGroup', 'Survived']].groupby(['FareGroup'], as_index=False).mean()
sns.barplot(x="FareGroup", y="Survived", data=titanic_list)
###Output
_____no_output_____
###Markdown
`Cabin`We transform this feature in binary variable, so it inform if he was or not in the cabin.
###Code
#!pip install tabulate
pd.unique(titanic_list['Cabin'])
# Now visualization of 'Gender'
# Printing counts and percentage of male and female
print(titanic_list['Cabin'].value_counts(sort=False))
print(titanic_list['Cabin'].value_counts(sort=False,normalize=True))
# Making variable categorical
#sub['SEX'] = sub['SEX'].astype('category')
# Visualising counts of Gender with bar graph
sns.countplot(x="Cabin", data=titanic_list);
plt.xlabel('Cabin')
plt.ylabel('Frequency')
plt.title('Count of Gender')
# Showing proportion of survival for different type of gender
sns.catplot(x="Cabin", y="Survived", data=titanic_list, kind="bar", ci=None)
plt.xlabel('Cabin')
plt.ylabel('Survive Percentage')
plt.title('Survive v/s Cabin')
titanic_list["Cabin"].value_counts()
# Create new variable InCabin
titanic_list['InCabin'] = ~titanic_list['Cabin'].isnull()
sns.barplot(x="InCabin", y="Survived", data=titanic_list)
plt.show()
#Turning cabin number into Deck
#cabin_only = titanic_list[["Cabin"]].copy()
titanic_list["Cabin_Data"] = titanic_list["Cabin"].isnull().apply(lambda x: not x)
titanic_list["Cabin_Data"]
titanic_list["Deck"] = titanic_list["Cabin"].str.slice(0,1)
titanic_list["Room"] = titanic_list["Cabin"].str.slice(1,5).str.extract("([0-9]+)", expand=False).astype("float")
titanic_list[titanic_list["Cabin_Data"]]
titanic_list[titanic_list["Deck"]=="F"]
###Output
_____no_output_____
###Markdown
First we'll drop the Cabin and Cabin_Data columns.
###Code
titanic_list.drop(["Cabin", "Cabin_Data"], axis=1, inplace=True, errors="ignore")
###Output
_____no_output_____
###Markdown
Now we'll deal with the missing values. For the deck column we will replace the null values with an unused letter to represent lack of data. For the room number we will simply use the mean.
###Code
titanic_list["Deck"] = titanic_list["Deck"].fillna("N")
titanic_list["Room"] = round(titanic_list["Room"].fillna(titanic_list["Room"].mean()),4)
titanic_list.info()
def one_hot_column(df, label, drop_col=False):
'''
This function will one hot encode the chosen column.
Args:
df: Pandas dataframe
label: Label of the column to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
'''
one_hot = pd.get_dummies(df[label], prefix=label)
if drop_col:
df = df.drop(label, axis=1)
df = df.join(one_hot)
return df
def one_hot(df, labels, drop_col=False):
'''
This function will one hot encode a list of columns.
Args:
df: Pandas dataframe
labels: list of the columns to encode
drop_col: boolean to decide if the chosen column should be dropped
Returns:
pandas dataframe with the given encoding
'''
for label in labels:
df = one_hot_column(df, label, drop_col)
return df
#titanic_list = one_hot(titanic_list, ["Deck"],drop_col=True)
titanic_list.head()
#cabin_only.columns.values[1:]
#for column in cabin_only.columns.values[1:]:
# titanic_list[column] = cabin_only[column]
###Output
_____no_output_____
###Markdown
`Age`Numerical variable with age of the passenger. We transform in categorical variable and grouped.
###Code
sns.barplot(x="Age", y="Survived", data=titanic_list)
plt.show()
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
titanic_list["Age"] = titanic_list["Age"].fillna(-0.5)
titanic_list['AgeGroup'] = pd.cut(titanic_list["Age"], bins, labels = labels)
sns.barplot(x="AgeGroup", y="Survived", data=titanic_list)
plt.show()
titanic_list.head(5)
###Output
_____no_output_____
###Markdown
`Name`Categorical variable with the name of the passenger. We extract from title names like as `Mr`, `Miss` or `Master`.
###Code
# Check the names
titanic_list['Name'].head(10)
# Create the function to extract the title
import re
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
# Apply get_title function
titanic_list['Title'] = titanic_list['Name'].apply(get_title)
# Check the results
pd.crosstab(titanic_list['Title'], titanic_list['Sex'])
###Output
_____no_output_____
###Markdown
Create groups for all frequents titles and the other will be `Rare`.
###Code
sns.barplot(x="Title", y="Survived", data=titanic_list)
plt.show()
# Convert to categorical values Title
titanic_list["Title"] = titanic_list["Title"].replace(['Lady', 'Countess',
'Capt', 'Col','Don', 'Dr',
'Major', 'Rev', 'Sir', 'Jonkheer',
'Dona'], 'Rare')
# Check the results
pd.crosstab(titanic_list['Title'], titanic_list['Sex'])
# Check the results
pd.crosstab(titanic_list['Title'], titanic_list['Survived'])
titanic_list["Title"] = titanic_list["Title"].map({"Master":0, "Miss":1, "Ms" : 1 ,
"Mme":1, "Mlle":1, "Mrs":1, "Mr":2,
"Rare":3})
sns.barplot(x="Title", y="Survived", data=titanic_list)
plt.show()
# Check all values and new features
titanic_list.dtypes
###Output
_____no_output_____
###Markdown
Others feature engineeringI will test the new features using cross-validation to see if they made a difference. `Age*Class`This is an interaction term, since age and class are both numbers we can just multiply them.
###Code
titanic_list['Age*Class'] = titanic_list['Age']*titanic_list['Pclass']
###Output
_____no_output_____
###Markdown
`Fare per Person`Here we divide the fare by the number of family members traveling together, I’m not exactly sure what this represents, but it’s easy enough to add in.
###Code
titanic_list['Fare_Per_Person'] = titanic_list['Fare']/(titanic_list['FamilySize']+1)
###Output
_____no_output_____
###Markdown
Remove features
###Code
# Backup titanic_list
titanic_list_bak = titanic_list
titanic_list_bak.head(5)
#titanic_list = titanic_list_bak
titanic_list.head(5)
###Output
_____no_output_____
###Markdown
Save 1st EDA, Wrangle and Cleanse data partTo be continued 2nd part for features selection
###Code
# Save dataset0 and dataset1 for next step: Modeling
titanic_list.to_csv('titanic_list.csv', index=False)
###Output
_____no_output_____ |
container_files/demos/Recommending Movies.ipynb | ###Markdown
Recommending MoviesThe [MovieLens 20M](http://files.grouplens.org/datasets/movielens/ml-20m-README.html) dataset contains 20 million user ratings from 1 to 5 of thousands of movies. In this demo we'll build a simple recommendation system which will use this data to suggest 25 movies based on a seed movie you provide. The notebook cells below use `pymldb`'s `Connection` class to make [REST API](../../../../doc/builtin/WorkingWithRest.md.html) calls. You can check out the [Using `pymldb` Tutorial](../../../../doc/nblink.html_tutorials/Using pymldb Tutorial) for more details.
###Code
from pymldb import Connection
mldb = Connection()
###Output
_____no_output_____
###Markdown
Download the MovieLens 20M dataWe'll start by using some command-line tools to download and decompress the data.
###Code
%%bash
mkdir -p /mldb_data/data
curl "http://public.mldb.ai/ml-20m.zip" 2>/dev/null > /mldb_data/data/ml-20m.zip
unzip /mldb_data/data/ml-20m.zip -d /mldb_data/data
%%bash
head /mldb_data/data/ml-20m/README.txt
%%bash
head /mldb_data/data/ml-20m/ratings.csv
###Output
userId,movieId,rating,timestamp
1,2,3.5,1112486027
1,29,3.5,1112484676
1,32,3.5,1112484819
1,47,3.5,1112484727
1,50,3.5,1112484580
1,112,3.5,1094785740
1,151,4.0,1094785734
1,223,4.0,1112485573
1,253,4.0,1112484940
###Markdown
Load the data into MLDBSee the [Loading Data Tutorial](../../../../doc/nblink.html_tutorials/Loading Data Tutorial) guide for more details on how to get data into MLDB. Here we load a text file and use the `pivot` aggregator to create a sparse matrix representation of the ratings.
###Code
%%time
print mldb.put('/v1/procedures/import_mvlns', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/ratings.csv",
"outputDataset": "mvlns_ratings_csv",
"runOnCreation": True
}
})
print mldb.put('/v1/procedures/process_mvlns', {
"type": "transform",
"params": {
"inputData": """
select pivot(movieId, rating) as *
named userId
from mvlns_ratings_csv
group by userId
""",
"outputDataset": "mvlns_ratings",
"runOnCreation": True
}
})
###Output
<Response [201]>
<Response [201]>
CPU times: user 18.4 ms, sys: 4.22 ms, total: 22.7 ms
Wall time: 1min 19s
###Markdown
Take a peek at the datasetWe'll use the [Query API](../../../../doc/builtin/sql/QueryAPI.md.html). Each row is a user, each column is a movie, and the cell value is the rating the user gave the movie.
###Code
mldb.query("select * from mvlns_ratings limit 3")
###Output
_____no_output_____
###Markdown
Singular Value Decomposition (SVD)We will create and run a [Procedure](../../../../doc/builtin/procedures/Procedures.md.html) of type [`svd.train`](../../../../doc/builtin/procedures/Svd.md.html). This creates an `embedding` dataset where each row is a movie and the columns represent coordinates in a 100-dimensional space. Similar movies end up closer to each other than dissimilar movies.
###Code
print mldb.put('/v1/procedures/mvlns_svd', {
"type" : "svd.train",
"params" : {
"trainingData" : "select COLUMN EXPR (where rowCount() > 3) from mvlns_ratings",
"columnOutputDataset" : "mvlns_svd_embedding",
"modelFileUrl": "file://models/mvlns.svd",
"functionName": "mvlns_svd_embedder",
"runOnCreation": True
}
})
###Output
<Response [201]>
###Markdown
Explore the results!Our dataset has `movieId`s but humans think about movie names so we'll load up the movie names in a dataset.
###Code
from ipywidgets import interact, interact_manual
from uuid import uuid4
print mldb.put('/v1/procedures/import_movies', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/movies.csv",
"outputDataset": "movies",
"select": "title, movieId",
"named": "movieId",
"runOnCreation": True
}
})
###Output
<Response [201]>
###Markdown
A simple search function to find all movies (and corresponding `movieId`s) whose names contain a string.
###Code
@interact
def movie_search(x = "toy story"):
return mldb.query("select title from movies where regex_match(lower(title), '.*%s.*')" % x.strip().lower())
###Output
_____no_output_____
###Markdown
Now let's create a dataset to hold user preferences, and a simple function to simulate a user rating movies they like and movies they dislike, based on the `movie_search` function above.
###Code
print mldb.put("/v1/datasets/mvlns_user_prefs", {"type": "sparse.mutable"})
print mldb.put("/v1/functions/preferences", {
"type": "sql.query",
"params": {
"query": "select {*} as p from mvlns_user_prefs where rowName()=$user"
}
})
def save_prefs(user_id, likes, dislikes):
for rating, search_terms in zip([5,1],[likes, dislikes]):
for x in search_terms.split(","):
if len(x) > 3:
mldb.post("/v1/datasets/mvlns_user_prefs/rows", {
"rowName":user_id,
"columns": [[str(m), rating, 0] for m in movie_search(x).index]
})
mldb.post("/v1/datasets/mvlns_user_prefs/commit", {})
save_prefs("janedoe", "Toy Story", "Terminator")
mldb.query("select preferences({ user: 'janedoe' })[p] as *")
###Output
<Response [201]>
<Response [201]>
###Markdown
With all that done, we can now build a recommendation engine out of a simple SQL query by mapping a user's preferences into the same space as the movie embeddings (i.e. embedding the user's preferences) and looking for the nearest movies.
###Code
print mldb.put("/v1/functions/nearest_movies", {
"type": "embedding.neighbors",
"params": {
"dataset": "mvlns_svd_embedding",
"defaultNumNeighbors": 25,
"columnName": "embedding"
}
})
print mldb.put("/v1/functions/recommendations", {
"type": "sql.query",
"params": {
"query": """
select nearest_movies({
coords: mvlns_svd_embedder({
row: preferences({ user: $user })[p]
})[embedding]
})[distances] as r
"""
}
})
###Output
<Response [201]>
<Response [201]>
###Markdown
Here's a simple function which lets you simulate the results of liking and disliking certain movies and getting back the resulting recommendations.
###Code
def recommend(likes="Toy Story, Terminator", dislikes="Star Trek"):
# here we simulate a new user saving these preferences
user_id = str(uuid4())
save_prefs(user_id, likes, dislikes)
# we can then run an SQL query to:
# - retrieve recommendations
# - transpose and join them to movies to get titles
# - exclude the already-rated movies from the result
return mldb.query("""
select m.title
named m.movieId
from
transpose(( select recommendations({ user: '%(user)s' }) )) as r
join movies as m on r.rowPathElement(2) = m.rowPathElement(0)
where m.movieId not in (keys of preferences({ user: '%(user)s' })[p])
order by r.result
""" % dict(user=user_id))
recommend(likes="Toy Story, Terminator", dislikes="Star Trek")
###Output
_____no_output_____
###Markdown
Here's an interactive form that lets you play with this function to see if you agree with the recommendations!NOTE: the interactive part of this demo only works if you're running this Notebook live, not if you're looking at a static copy on http://docs.mldb.ai. See the documentation for [Running MLDB](../../../../doc/builtin/Running.md.html).
###Code
interact_manual(recommend)
###Output
_____no_output_____
###Markdown
Recommending MoviesThe [MovieLens 20M](http://files.grouplens.org/datasets/movielens/ml-20m-README.html) dataset contains 20 million user ratings from 1 to 5 of thousands of movies. In this demo we'll build a simple recommendation system which will use this data to suggest 25 movies based on a seed movie you provide. The notebook cells below use `pymldb`'s `Connection` class to make [REST API](../../../../doc/builtin/WorkingWithRest.md.html) calls. You can check out the [Using `pymldb` Tutorial](../../../../doc/nblink.html_tutorials/Using pymldb Tutorial) for more details.
###Code
from pymldb import Connection
mldb = Connection()
###Output
_____no_output_____
###Markdown
Download the MovieLens 20M dataWe'll start by using some command-line tools to download and decompress the data.
###Code
%%bash
mkdir -p /mldb_data/data
curl "file://mldb/mldb_test_data/ml-20m.zip" 2>/dev/null > /mldb_data/data/ml-20m.zip
unzip /mldb_data/data/ml-20m.zip -d /mldb_data/data
%%bash
head /mldb_data/data/ml-20m/README.txt
%%bash
head /mldb_data/data/ml-20m/ratings.csv
###Output
userId,movieId,rating,timestamp
1,2,3.5,1112486027
1,29,3.5,1112484676
1,32,3.5,1112484819
1,47,3.5,1112484727
1,50,3.5,1112484580
1,112,3.5,1094785740
1,151,4.0,1094785734
1,223,4.0,1112485573
1,253,4.0,1112484940
###Markdown
Load the data into MLDBSee the [Loading Data Tutorial](../../../../doc/nblink.html_tutorials/Loading Data Tutorial) guide for more details on how to get data into MLDB. Here we load a text file and use the `pivot` aggregator to create a sparse matrix representation of the ratings.
###Code
%%time
print mldb.put('/v1/procedures/import_mvlns', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/ratings.csv",
"outputDataset": "mvlns_ratings_csv",
"runOnCreation": True
}
})
print mldb.put('/v1/procedures/process_mvlns', {
"type": "transform",
"params": {
"inputData": """
select pivot(movieId, rating) as *
named userId
from mvlns_ratings_csv
group by userId
""",
"outputDataset": "mvlns_ratings",
"runOnCreation": True
}
})
###Output
<Response [201]>
<Response [201]>
CPU times: user 18.4 ms, sys: 4.22 ms, total: 22.7 ms
Wall time: 1min 19s
###Markdown
Take a peek at the datasetWe'll use the [Query API](../../../../doc/builtin/sql/QueryAPI.md.html). Each row is a user, each column is a movie, and the cell value is the rating the user gave the movie.
###Code
mldb.query("select * from mvlns_ratings limit 3")
###Output
_____no_output_____
###Markdown
Singular Value Decomposition (SVD)We will create and run a [Procedure](../../../../doc/builtin/procedures/Procedures.md.html) of type [`svd.train`](../../../../doc/builtin/procedures/Svd.md.html). This creates an `embedding` dataset where each row is a movie and the columns represent coordinates in a 100-dimensional space. Similar movies end up closer to each other than dissimilar movies.
###Code
print mldb.put('/v1/procedures/mvlns_svd', {
"type" : "svd.train",
"params" : {
"trainingData" : "select COLUMN EXPR (where rowCount() > 3) from mvlns_ratings",
"columnOutputDataset" : "mvlns_svd_embedding",
"modelFileUrl": "file://models/mvlns.svd",
"functionName": "mvlns_svd_embedder",
"runOnCreation": True
}
})
###Output
<Response [201]>
###Markdown
Explore the results!Our dataset has `movieId`s but humans think about movie names so we'll load up the movie names in a dataset.
###Code
from ipywidgets import interact, interact_manual
from uuid import uuid4
print mldb.put('/v1/procedures/import_movies', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/movies.csv",
"outputDataset": "movies",
"select": "title, movieId",
"named": "movieId",
"runOnCreation": True
}
})
###Output
<Response [201]>
###Markdown
A simple search function to find all movies (and corresponding `movieId`s) whose names contain a string.
###Code
@interact
def movie_search(x = "toy story"):
return mldb.query("select title from movies where regex_match(lower(title), '.*%s.*')" % x.strip().lower())
###Output
_____no_output_____
###Markdown
Now let's create a dataset to hold user preferences, and a simple function to simulate a user rating movies they like and movies they dislike, based on the `movie_search` function above.
###Code
print mldb.put("/v1/datasets/mvlns_user_prefs", {"type": "sparse.mutable"})
print mldb.put("/v1/functions/preferences", {
"type": "sql.query",
"params": {
"query": "select {*} as p from mvlns_user_prefs where rowName()=$user"
}
})
def save_prefs(user_id, likes, dislikes):
for rating, search_terms in zip([5,1],[likes, dislikes]):
for x in search_terms.split(","):
if len(x) > 3:
mldb.post("/v1/datasets/mvlns_user_prefs/rows", {
"rowName":user_id,
"columns": [[str(m), rating, 0] for m in movie_search(x).index]
})
mldb.post("/v1/datasets/mvlns_user_prefs/commit", {})
save_prefs("janedoe", "Toy Story", "Terminator")
mldb.query("select preferences({ user: 'janedoe' })[p] as *")
###Output
<Response [201]>
<Response [201]>
###Markdown
With all that done, we can now build a recommendation engine out of a simple SQL query by mapping a user's preferences into the same space as the movie embeddings (i.e. embedding the user's preferences) and looking for the nearest movies.
###Code
print mldb.put("/v1/functions/nearest_movies", {
"type": "embedding.neighbors",
"params": {
"dataset": "mvlns_svd_embedding",
"defaultNumNeighbors": 25,
"columnName": "embedding"
}
})
print mldb.put("/v1/functions/recommendations", {
"type": "sql.query",
"params": {
"query": """
select nearest_movies({
coords: mvlns_svd_embedder({
row: preferences({ user: $user })[p]
})[embedding]
})[distances] as r
"""
}
})
###Output
<Response [201]>
<Response [201]>
###Markdown
Here's a simple function which lets you simulate the results of liking and disliking certain movies and getting back the resulting recommendations.
###Code
def recommend(likes="Toy Story, Terminator", dislikes="Star Trek"):
# here we simulate a new user saving these preferences
user_id = str(uuid4())
save_prefs(user_id, likes, dislikes)
# we can then run an SQL query to:
# - retrieve recommendations
# - transpose and join them to movies to get titles
# - exclude the already-rated movies from the result
return mldb.query("""
select m.title
named m.movieId
from
transpose(( select recommendations({ user: '%(user)s' }) )) as r
join movies as m on r.rowPathElement(2) = m.rowPathElement(0)
where m.movieId not in (keys of preferences({ user: '%(user)s' })[p])
order by r.result
""" % dict(user=user_id))
recommend(likes="Toy Story, Terminator", dislikes="Star Trek")
###Output
_____no_output_____
###Markdown
Here's an interactive form that lets you play with this function to see if you agree with the recommendations!NOTE: the interactive part of this demo only works if you're running this Notebook live, not if you're looking at a static copy on http://docs.mldb.ai. See the documentation for [Running MLDB](../../../../doc/builtin/Running.md.html).
###Code
interact_manual(recommend)
###Output
_____no_output_____ |
tutorials/W1D5_DimensionalityReduction/W1D5_Tutorial4.ipynb | ###Markdown
Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom --- Tutorial ObjectivesIn this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Video 1: PCA Applications
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
--- SetupRun these cells to get the tutorial started.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784', as_frame = False)
X = mnist.data
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
pca_model = PCA(n_components=2) # Initializes PCA
pca_model.fit(X) # Performs PCA
###Output
_____no_output_____
###Markdown
Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
#################################################
# Take only the first 2000 samples with the corresponding labels
# X, labels = ...
# Perform PCA
# scores = pca_model.transform(X)
# Plot the data and reconstruction
# visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think!- What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?- Do some pairs of numerals appear to be more distinguishable than others? --- Section 2: Visualize MNIST in 2D using t-SNE
###Code
# @title Video 2: Nonlinear Methods
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Exercise 2: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students: perform tSNE and visualize the data
#################################################
# perform t-SNE
embed = ...
# Visualize the data
# visualize_components(..., ..., labels)
# to_remove solution
# perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Exercise 3: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Uncomment when you complete the function
# values = [50, 5, 2]
# explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Uncomment when you complete the function
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesIn this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Video 1: PCA Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- SetupRun these cells to get the tutorial started.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784', as_frame = False)
X = mnist.data
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
pca_model = PCA(n_components=2) # Initializes PCA
pca_model.fit(X) # Performs PCA
###Output
_____no_output_____
###Markdown
Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
#################################################
# Take only the first 2000 samples with the corresponding labels
# X, labels = ...
# Perform PCA
# scores = pca_model.transform(X)
# Plot the data and reconstruction
# visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think!- What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?- Do some pairs of numerals appear to be more distinguishable than others? --- Section 2: Visualize MNIST in 2D using t-SNE
###Code
# @title Video 2: Nonlinear Methods
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Exercise 2: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students: perform tSNE and visualize the data
#################################################
# perform t-SNE
embed = ...
# Visualize the data
# visualize_components(..., ..., labels)
# to_remove solution
# perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Exercise 3: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Uncomment when you complete the function
# values = [50, 5, 2]
# explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Uncomment when you complete the function
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Neuromatch Academy: Week 1, Day 5, Tutorial 4 Dimensionality Reduction: Nonlinear dimensionality reduction__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom --- Tutorial ObjectivesIn this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Video 1: PCA Applications
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
--- SetupRun these cells to get the tutorial started.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784')
X = mnist.data
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
pca_model = PCA(n_components=2) # Initializes PCA
pca_model.fit(X) # Performs PCA
###Output
_____no_output_____
###Markdown
Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
#################################################
# Take only the first 2000 samples with the corresponding labels
# X, labels = ...
# Perform PCA
# scores = pca_model.transform(X)
# Plot the data and reconstruction
# visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think!- What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?- Do some pairs of numerals appear to be more distinguishable than others? --- Section 2: Visualize MNIST in 2D using t-SNE
###Code
# @title Video 2: Nonlinear Methods
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Exercise 2: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students: perform tSNE and visualize the data
#################################################
# perform t-SNE
embed = ...
# Visualize the data
# visualize_components(..., ..., labels)
# to_remove solution
# perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Exercise 3: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Uncomment when you complete the function
# values = [50, 5, 2]
# explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Uncomment when you complete the function
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom, Siddharth Suresh, Natalie Schaworonkow, Ella Batty **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives*Estimated timing of tutorial: 35 minutes*In this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/kaq2x/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Video 1: PCA Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Jf4y1R7UZ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
# Get images
mnist = fetch_openml(name='mnist_784', as_frame = False)
X = mnist.data
# Get labels
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
# Initializes PCA
pca_model = PCA(n_components=2)
# Performs PCA
pca_model.fit(X)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform PCA")
#################################################
# Take only the first 2000 samples with the corresponding labels
X, labels = ...
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think! 1: PCA Visualization1. What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?2. Do some pairs of numerals appear to be more distinguishable than others?
###Code
# to_remove explanation
"""
1) Images corresponding to the some labels (numbers) are sort of clustered together
in some cases but there's a lot of overlap and definitely not a clear distinction between
all the number clusters.
2) The zeros and ones seem fairly non-overlapping.
"""
###Output
_____no_output_____
###Markdown
--- Section 2: Visualize MNIST in 2D using t-SNE*Estimated timing to here from start of tutorial: 15 min*
###Code
# @title Video 2: Nonlinear Methods
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV14Z4y1u7HG", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform t-SNE")
#################################################
# Perform t-SNE
embed = ...
# Visualize the data
visualize_components(..., ..., labels)
# to_remove solution
# Perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.2: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# Perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Visualize
values = [50, 5, 2]
explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# Perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Visualize
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom --- Tutorial ObjectivesIn this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Video 1: PCA Applications
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
--- SetupRun these cells to get the tutorial started.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784', as_frame = False)
X = mnist.data
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
pca_model = PCA(n_components=2) # Initializes PCA
pca_model.fit(X) # Performs PCA
###Output
_____no_output_____
###Markdown
Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
#################################################
# Take only the first 2000 samples with the corresponding labels
# X, labels = ...
# Perform PCA
# scores = pca_model.transform(X)
# Plot the data and reconstruction
# visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think!- What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?- Do some pairs of numerals appear to be more distinguishable than others? --- Section 2: Visualize MNIST in 2D using t-SNE
###Code
# @title Video 2: Nonlinear Methods
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Exercise 2: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students: perform tSNE and visualize the data
#################################################
# perform t-SNE
embed = ...
# Visualize the data
# visualize_components(..., ..., labels)
# to_remove solution
# perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Exercise 3: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Uncomment when you complete the function
# values = [50, 5, 2]
# explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Uncomment when you complete the function
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom, Siddharth Suresh, Natalie Schaworonkow, Ella Batty **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives*Estimated timing of tutorial: 35 minutes*In this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/kaq2x/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Video 1: PCA Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Jf4y1R7UZ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
# Get images
mnist = fetch_openml(name='mnist_784', as_frame=False)
X_all = mnist.data
# Get labels
labels_all = np.array([int(k) for k in mnist.target])
###Output
_____no_output_____
###Markdown
**Note:** We saved the complete dataset with as `X_all` and the labels as `labels_all`. To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
# Initializes PCA
pca_model = PCA(n_components=2)
# Performs PCA
pca_model.fit(X_all)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform PCA")
#################################################
# Take only the first 2000 samples with the corresponding labels
X, labels = ...
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X_all[:2000, :], labels_all[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think! 1: PCA Visualization1. What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?2. Do some pairs of numerals appear to be more distinguishable than others?
###Code
# to_remove explanation
"""
1) Images corresponding to the some labels (numbers) are sort of clustered together
in some cases but there's a lot of overlap and definitely not a clear distinction between
all the number clusters.
2) The zeros and ones seem fairly non-overlapping.
"""
###Output
_____no_output_____
###Markdown
--- Section 2: Visualize MNIST in 2D using t-SNE*Estimated timing to here from start of tutorial: 15 min*
###Code
# @title Video 2: Nonlinear Methods
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV14Z4y1u7HG", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform t-SNE")
#################################################
# Perform t-SNE
embed = ...
# Visualize the data
visualize_components(..., ..., labels)
# to_remove solution
# Perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.2: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values, X, labels):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
X (np.ndarray of floats) : matrix with the dataset
labels (np.ndarray of int) : array with the labels
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# Perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Visualize
values = [50, 5, 2]
explore_perplexity(values, X, labels)
# to_remove solution
def explore_perplexity(values, X, labels):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
X (np.ndarray of floats) : matrix with the dataset
labels (np.ndarray of int) : array with the labels
Returns:
Nothing.
"""
for perp in values:
# Perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Visualize
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values, X, labels)
###Output
_____no_output_____
###Markdown
[](https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/W1D5_Tutorial4.ipynb) Tutorial 4: Nonlinear Dimensionality Reduction**Week 1, Day 5: Dimensionality Reduction****By Neuromatch Academy**__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom, Siddharth Suresh, Natalie Schaworonkow, Ella Batty **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives*Estimated timing of tutorial: 35 minutes*In this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/kaq2x/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Video 1: PCA Applications
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Jf4y1R7UZ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
# Get images
mnist = fetch_openml(name='mnist_784', as_frame = False)
X = mnist.data
# Get labels
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
# Initializes PCA
pca_model = PCA(n_components=2)
# Performs PCA
pca_model.fit(X)
###Output
_____no_output_____
###Markdown
Coding Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform PCA")
#################################################
# Take only the first 2000 samples with the corresponding labels
X, labels = ...
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think! 1: PCA Visualization1. What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?2. Do some pairs of numerals appear to be more distinguishable than others?
###Code
# to_remove explanation
"""
1) Images corresponding to the some labels (numbers) are sort of clustered together
in some cases but there's a lot of overlap and definitely not a clear distinction between
all the number clusters.
2) The zeros and ones seem fairly non-overlapping.
"""
###Output
_____no_output_____
###Markdown
--- Section 2: Visualize MNIST in 2D using t-SNE*Estimated timing to here from start of tutorial: 15 min*
###Code
# @title Video 2: Nonlinear Methods
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV14Z4y1u7HG", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students
# Comment once you've completed the code
raise NotImplementedError("Student excercise: perform t-SNE")
#################################################
# Perform t-SNE
embed = ...
# Visualize the data
visualize_components(..., ..., labels)
# to_remove solution
# Perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.2: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# Perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Visualize
values = [50, 5, 2]
explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# Perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Visualize
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____
###Markdown
Neuromatch Academy: Week 1, Day 5, Tutorial 4 Dimensionality Reduction: Nonlinear dimensionality reduction__Content creators:__ Alex Cayco Gajic, John Murray__Content reviewers:__ Roozbeh Farhoudi, Matt Krause, Spiros Chavlis, Richard Gao, Michael Waskom --- Tutorial ObjectivesIn this notebook we'll explore how dimensionality reduction can be useful for visualizing and inferring structure in your data. To do this, we will compare PCA with t-SNE, a nonlinear dimensionality reduction method.Overview:- Visualize MNIST in 2D using PCA.- Visualize MNIST in 2D using t-SNE.
###Code
# @title Video 1: PCA Applications
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2Zb93aOWioM", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
--- SetupRun these cells to get the tutorial started.
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
def visualize_components(component1, component2, labels, show=True):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors.
Args:
component1 (numpy array of floats) : Vector of component 1 scores
component2 (numpy array of floats) : Vector of component 2 scores
labels (numpy array of floats) : Vector corresponding to categories of
samples
Returns:
Nothing.
"""
plt.figure()
cmap = plt.cm.get_cmap('tab10')
plt.scatter(x=component1, y=component2, c=labels, cmap=cmap)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
if show:
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Visualize MNIST in 2D using PCAIn this exercise, we'll visualize the first few components of the MNIST dataset to look for evidence of structure in the data. But in this tutorial, we will also be interested in the label of each image (i.e., which numeral it is from 0 to 9). Start by running the following cell to reload the MNIST dataset (this takes a few seconds).
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml(name='mnist_784')
X = mnist.data
labels = [int(k) for k in mnist.target]
labels = np.array(labels)
###Output
_____no_output_____
###Markdown
To perform PCA, we now will use the method implemented in sklearn. Run the following cell to set the parameters of PCA - we will only look at the top 2 components because we will be visualizing the data in 2D.
###Code
from sklearn.decomposition import PCA
pca_model = PCA(n_components=2) # Initializes PCA
pca_model.fit(X) # Performs PCA
###Output
_____no_output_____
###Markdown
Exercise 1: Visualization of MNIST in 2D using PCAFill in the code below to perform PCA and visualize the top two components. For better visualization, take only the first 2,000 samples of the data (this will also make t-SNE much faster in the following section of the tutorial so don't skip this step!)**Suggestions:**- Truncate the data matrix at 2,000 samples. You will also need to truncate the array of labels.- Perform PCA on the truncated data.- Use the function `visualize_components` to plot the labelled data.
###Code
help(visualize_components)
help(pca_model.transform)
#################################################
## TODO for students: take only 2,000 samples and perform PCA
#################################################
# Take only the first 2000 samples with the corresponding labels
# X, labels = ...
# Perform PCA
# scores = pca_model.transform(X)
# Plot the data and reconstruction
# visualize_components(...)
# to_remove solution
# Take only the first 2000 samples with the corresponding labels
X, labels = X[:2000, :], labels[:2000]
# Perform PCA
scores = pca_model.transform(X)
# Plot the data and reconstruction
with plt.xkcd():
visualize_components(scores[:, 0], scores[:, 1], labels)
###Output
_____no_output_____
###Markdown
Think!- What do you see? Are different samples corresponding to the same numeral clustered together? Is there much overlap?- Do some pairs of numerals appear to be more distinguishable than others? --- Section 2: Visualize MNIST in 2D using t-SNE
###Code
# @title Video 2: Nonlinear Methods
video = YouTubeVideo(id="5Xpb0YaN5Ms", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Next we will analyze the same data using t-SNE, a nonlinear dimensionality reduction method that is useful for visualizing high dimensional data in 2D or 3D. Run the cell below to get started.
###Code
from sklearn.manifold import TSNE
tsne_model = TSNE(n_components=2, perplexity=30, random_state=2020)
###Output
_____no_output_____
###Markdown
Exercise 2: Apply t-SNE on MNISTFirst, we'll run t-SNE on the data to explore whether we can see more structure. The cell above defined the parameters that we will use to find our embedding (i.e, the low-dimensional representation of the data) and stored them in `model`. To run t-SNE on our data, use the function `model.fit_transform`.**Suggestions:**- Run t-SNE using the function `model.fit_transform`.- Plot the result data using `visualize_components`.
###Code
help(tsne_model.fit_transform)
#################################################
## TODO for students: perform tSNE and visualize the data
#################################################
# perform t-SNE
embed = ...
# Visualize the data
# visualize_components(..., ..., labels)
# to_remove solution
# perform t-SNE
embed = tsne_model.fit_transform(X)
# Visualize the data
with plt.xkcd():
visualize_components(embed[:, 0], embed[:, 1], labels)
###Output
_____no_output_____
###Markdown
Exercise 3: Run t-SNE with different perplexitiesUnlike PCA, t-SNE has a free parameter (the perplexity) that roughly determines how global vs. local information is weighted. Here we'll take a look at how the perplexity affects our interpretation of the results. **Steps:**- Rerun t-SNE (don't forget to re-initialize using the function `TSNE` as above) with a perplexity of 50, 5 and 2.
###Code
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
#################################################
## TO DO for students: Insert your code here to redefine the t-SNE "model"
## while setting the perplexity perform t-SNE on the data and plot the
## results for perplexity = 50, 5, and 2 (set random_state to 2020
# Comment these lines when you complete the function
raise NotImplementedError("Student Exercise! Explore t-SNE with different perplexity")
#################################################
# perform t-SNE
tsne_model = ...
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
# Uncomment when you complete the function
# values = [50, 5, 2]
# explore_perplexity(values)
# to_remove solution
def explore_perplexity(values):
"""
Plots a 2D representation of the data for visualization with categories
labelled as different colors using different perplexities.
Args:
values (list of floats) : list with perplexities to be visualized
Returns:
Nothing.
"""
for perp in values:
# perform t-SNE
tsne_model = TSNE(n_components=2, perplexity=perp, random_state=2020)
embed = tsne_model.fit_transform(X)
visualize_components(embed[:, 0], embed[:, 1], labels, show=False)
plt.title(f"perplexity: {perp}")
plt.show()
# Uncomment when you complete the function
values = [50, 5, 2]
with plt.xkcd():
explore_perplexity(values)
###Output
_____no_output_____ |
multi_class_classification_of_handwritten_digits.ipynb | ###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC. 本课程原版地址:https://colab.research.google.com/notebooks/mlcc/multi-class_classification_of_handwritten_digits.ipynb?utm_source=mlcc&utm_campaign=colab-external&utm_medium=referral&utm_content=multiclass-colab&hl=en采用Apache 2.0协议 人工神经网络用于手写体识别(简化版课程)  **学习目标:** * 训练一个神经网络模型用于识别手写体经典问题 [MNIST](http://yann.lecun.com/exdb/mnist/) 模型:简单的神经网络模型,有少数隐层,并采用Softmax作为分类函数
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
每一行代表一个手写体的数据实例。列0代表人工指定的本行数据值(0~9)。比如,如果列0为“6”,那么就是说,这行数据像素的实际内容是手写的6.所以这个问题被简化为预测10种不同分类的分类问题。  列1至784是特征值,每个像素一个数值,也就是说,有 28×28=784 像素值,为0~255的一个灰度
###Code
mnist_dataframe.loc[:, 72:72]
def parse_labels_and_features(dataset):
"""
将每一行训练数据都拆分为标记量和特征量——1个标记量Label,784个特征量Features
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
def construct_feature_columns():
"""
构造特征列
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""
训练输入函数,根据特征量、标记量、本次训练集的数据批量、迭代次数进行训练,并且有是否打乱数据的选项
返回一个回调函数
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""
构造预测函数,根据特征量、标记量和本次代预测的批量,构造一个回调函数
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""
训练神经网络模型,可调参数:学习率、迭代次数、训练批量大小、训练特征集、训练目标集、验证特征集、验证目标集
返回一个DNN神经网络分类器对象
"""
periods = 10
# 小心使用:每次调用上述流水线都会重置
# 如果迭代次数太小,则可能永远都没办法达到训练目标
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 4.50
period 01 : 3.73
period 02 : 3.05
period 03 : 2.68
period 04 : 2.47
period 05 : 2.24
period 06 : 2.14
period 07 : 2.11
period 08 : 1.93
period 09 : 2.02
Model training finished.
Final accuracy (on validation data): 0.94
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
Accuracy on test data: 0.95
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/Adagrad', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/Adagrad', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/Adagrad', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/Adagrad', 'dnn/logits/bias', 'dnn/logits/bias/t_0/Adagrad', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/Adagrad', 'global_step']
weights0 shape: (784, 100)
###Markdown
[View in Colaboratory](https://colab.research.google.com/github/douglaswchung/MNIST-classification/blob/master/multi_class_classification_of_handwritten_digits.ipynb) Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
!wget https://storage.googleapis.com/mledu-datasets/mnist_train_small.csv -O /tmp/mnist_train_small.csv
import glob
import io
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import scipy as sp
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
io.open("/tmp/mnist_train_small.csv", "r"),
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
print(display.display(mnist_dataframe.describe()))
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
print(display.display(mnist_dataframe.loc[:, range(69,784,100)].describe()))
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "LogLoss error (on validation data):"
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, validation_log_loss)
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print "Model training finished."
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_probabilities = np.array([item['probabilities'] for item in final_predictions])
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print "Final accuracy (on validation data): %0.2f" % accuracy
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.01,
steps=1000,
batch_size=100,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 5.29
period 01 : 4.35
period 02 : 4.16
period 03 : 4.03
period 04 : 3.88
period 05 : 3.83
period 06 : 3.79
period 07 : 3.79
period 08 : 3.80
period 09 : 3.66
Model training finished.
Final accuracy (on validation data): 0.89
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
def train_NN_classifier_model(
learning_rate,
steps,
batch_size,
hidden_units,
dropout,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(),
hidden_units=hidden_units,
activation_fn=tf.nn.crelu,
dropout=dropout,
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "LogLoss error (on validation data):"
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, validation_log_loss)
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print "Model training finished."
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print "Final accuracy (on validation data): %0.2f" % accuracy
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_NN_classifier_model(
learning_rate=1.25,
steps=1000,
batch_size=75,
hidden_units=[120,40,40],
dropout=0.02,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 4.79
period 01 : 3.72
period 02 : 3.43
period 03 : 2.90
period 04 : 2.62
period 05 : 2.42
period 06 : 2.29
period 07 : 2.38
period 08 : 2.24
period 09 : 2.03
Model training finished.
Final accuracy (on validation data): 0.94
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
!wget https://storage.googleapis.com/mledu-datasets/mnist_test.csv -O /tmp/mnist_test.csv
mnist_test_dataframe = pd.read_csv(
io.open("/tmp/mnist_test.csv", "r"),
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=150)
# Calculate final predictions (not probabilities, as above).
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print "Final accuracy (on validation data): %0.2f" % accuracy
###Output
Final accuracy (on validation data): 0.95
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "LogLoss error (on validation data):"
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, validation_log_loss)
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print "Model training finished."
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print "Final accuracy (on validation data): %0.2f" % accuracy
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 4.95
period 01 : 3.54
period 02 : 2.85
period 03 : 2.74
period 04 : 2.82
period 05 : 2.51
period 06 : 2.02
period 07 : 1.95
period 08 : 2.38
period 09 : 1.93
Model training finished.
Final accuracy (on validation data): 0.94
###Markdown
Next, we verify the accuracy on the test set.
###Code
!wget https://storage.googleapis.com/mledu-datasets/mnist_test.csv -O /tmp/mnist_test.csv
mnist_test_dataframe = pd.read_csv(
io.open("/tmp/mnist_test.csv", "r"),
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=150)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print "Accuracy on test data: %0.2f" % accuracy
###Output
Accuracy on test data: 0.95
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print classifier.get_variable_names()
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print "weights0 shape:", weights0.shape
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/Adadelta', 'dnn/hiddenlayer_0/bias/t_0/Adadelta_1', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/Adadelta', 'dnn/hiddenlayer_0/kernel/t_0/Adadelta_1', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/Adadelta', 'dnn/hiddenlayer_1/bias/t_0/Adadelta_1', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/Adadelta', 'dnn/hiddenlayer_1/kernel/t_0/Adadelta_1', 'dnn/hiddenlayer_2/bias', 'dnn/hiddenlayer_2/bias/t_0/Adadelta', 'dnn/hiddenlayer_2/bias/t_0/Adadelta_1', 'dnn/hiddenlayer_2/kernel', 'dnn/hiddenlayer_2/kernel/t_0/Adadelta', 'dnn/hiddenlayer_2/kernel/t_0/Adadelta_1', 'dnn/logits/bias', 'dnn/logits/bias/t_0/Adadelta', 'dnn/logits/bias/t_0/Adadelta_1', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/Adadelta', 'dnn/logits/kernel/t_0/Adadelta_1', 'global_step']
weights0 shape: (784, 120)
###Markdown
[View in Colaboratory](https://colab.research.google.com/github/ArunkumarRamanan/Exercises-Machine-Learning-Crash-Course-Google-Developers/blob/master/multi_class_classification_of_handwritten_digits.ipynb) Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.03,
steps=2000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 3.97
period 01 : 3.54
period 02 : 3.51
period 03 : 3.36
period 04 : 3.44
period 05 : 3.32
period 06 : 3.22
period 07 : 3.38
period 08 : 3.25
period 09 : 3.15
Model training finished.
Final accuracy (on validation data): 0.91
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
Accuracy on test data: 0.95
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/Adagrad', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/Adagrad', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/Adagrad', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/Adagrad', 'dnn/logits/bias', 'dnn/logits/bias/t_0/Adagrad', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/Adagrad', 'global_step']
weights0 shape: (784, 100)
###Markdown
The first hidden layer of the neural network should be modeling some pretty low level features, so visualizing the weights will probably just show some fuzzy blobs or possibly a few parts of digits. You may also see some neurons that are essentially noise -- these are either unconverged or they are being ignored by higher layers.It can be interesting to stop training at different numbers of iterations and see the effect.**Train the classifier for 10, 100 and respectively 1000 steps. Then run this visualization again.**What differences do you see visually for the different levels of convergence?
###Code
weights1 = classifier.get_variable_value("dnn/hiddenlayer_1/kernel")
print("weights1 shape:", weights1.shape)
num_nodes = weights1.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights1.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(10, 10), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
weights2 = classifier.get_variable_value("dnn/logits/kernel")
print("weights2 shape:", weights2.shape)
num_nodes = weights2.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights2.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(10, 10), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
weights2 shape: (100, 10)
###Markdown
[View in Colaboratory](https://colab.research.google.com/github/nikhilbhatewara/GoogleMachineLearningCrashCourse/blob/master/multi_class_classification_of_handwritten_digits.ipynb) Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.2,
steps=100,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 11.62
period 01 : 11.49
period 02 : 8.98
period 03 : 9.09
period 04 : 6.41
period 05 : 6.33
period 06 : 6.07
period 07 : 6.95
period 08 : 5.94
period 09 : 5.07
Model training finished.
Final accuracy (on validation data): 0.85
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=10,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 28.35
period 01 : 21.47
period 02 : 29.57
period 03 : 25.10
period 04 : 22.51
period 05 : 21.00
period 06 : 21.44
period 07 : 17.42
period 08 : 26.36
period 09 : 14.98
Model training finished.
Final accuracy (on validation data): 0.57
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
Accuracy on test data: 0.56
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/Adagrad', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/Adagrad', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/Adagrad', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/Adagrad', 'dnn/logits/bias', 'dnn/logits/bias/t_0/Adagrad', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/Adagrad', 'global_step']
weights0 shape: (784, 100)
###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 0:0]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 19.25
period 01 : 11.52
period 02 : 7.97
period 03 : 7.96
period 04 : 7.63
period 05 : 7.00
period 06 : 6.55
period 07 : 5.55
period 08 : 5.83
period 09 : 5.39
Model training finished.
Final accuracy (on validation data): 0.84
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 4.41
period 01 : 3.69
period 02 : 3.56
period 03 : 3.40
period 04 : 3.36
period 05 : 3.23
period 06 : 3.26
period 07 : 2.98
period 08 : 3.01
period 09 : 3.05
Model training finished.
Final accuracy (on validation data): 0.91
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
hidden_units=hidden_units,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
_ = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 4.59
period 01 : 3.52
period 02 : 2.47
period 03 : 1.91
period 04 : 1.88
period 05 : 1.34
period 06 : 1.35
period 07 : 1.19
period 08 : 0.97
period 09 : 1.02
Model training finished.
Final accuracy (on validation data): 0.97
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=100,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 18.32
period 01 : 12.96
period 02 : 9.09
period 03 : 8.04
period 04 : 7.61
period 05 : 7.57
period 06 : 6.00
period 07 : 4.77
period 08 : 5.91
period 09 : 3.88
Model training finished.
Final accuracy (on validation data): 0.89
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
Accuracy on test data: 0.40
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/Adagrad', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/Adagrad', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/Adagrad', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/Adagrad', 'dnn/logits/bias', 'dnn/logits/bias/t_0/Adagrad', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/Adagrad', 'global_step']
weights0 shape: (784, 100)
###Markdown
[View in Colaboratory](https://colab.research.google.com/github/DillipKS/MLCC_assignments/blob/master/multi_class_classification_of_handwritten_digits.ipynb) Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Classifying Handwritten Digits with Neural Networks  **Learning Objectives:** * Train both a linear model and a neural network to classify handwritten digits from the classic [MNIST](http://yann.lecun.com/exdb/mnist/) data set * Compare the performance of the linear and neural network classification models * Visualize the weights of a neural-network hidden layer Our goal is to map each input image to the correct numeric digit. We will create a NN with a few hidden layers and a Softmax layer at the top to select the winning class. SetupFirst, let's download the data set, import TensorFlow and other utilities, and load the data into a *pandas* `DataFrame`. Note that this data is a sample of the original MNIST training data; we've taken 20000 rows at random.
###Code
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
###Output
_____no_output_____
###Markdown
Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes.  Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. For example, adjust the following text block to print out the values in column 72.
###Code
mnist_dataframe.loc[:, 72:72]
###Output
_____no_output_____
###Markdown
Now, let's parse out the labels and features and look at a few examples. Note the use of `loc` which allows us to pull out columns based on original location, since we don't have a header row in this data set.
###Code
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
display.display(training_targets.hist())
display.display(validation_targets.hist())
###Output
_____no_output_____
###Markdown
Show a random example and its corresponding label.
###Code
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
###Output
_____no_output_____
###Markdown
Task 1: Build a Linear Model for MNISTFirst, let's create a baseline model to compare against. The `LinearClassifier` provides a set of *k* one-vs-all classifiers, one for each of the *k* classes.You'll notice that in addition to reporting accuracy, and plotting Log Loss over time, we also display a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix shows which classes were misclassified as other classes. Which digits get confused for each other?Also note that we track the model's error using the `log_loss` function. This should not be confused with the loss function internal to `LinearClassifier` that is used for training.
###Code
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
###Output
_____no_output_____
###Markdown
Here, we'll make separate input functions for training and for prediction. We'll nest them in `create_training_input_fn()` and `create_predict_input_fn()`, respectively, so we can invoke these functions to return the corresponding `_input_fn`s to pass to our `.train()` and `.predict()` calls.
###Code
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
###Output
_____no_output_____
###Markdown
**Spend 5 minutes seeing how well you can do on accuracy with a linear model of this form. For this exercise, limit yourself to experimenting with the hyperparameters for batch size, learning rate and steps.**Stop if you get anything above about 0.9 accuracy.
###Code
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=800,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 5.58
period 01 : 4.89
period 02 : 4.68
period 03 : 4.42
period 04 : 4.50
period 05 : 4.14
period 06 : 4.03
period 07 : 4.19
period 08 : 4.13
period 09 : 3.94
Model training finished.
Final accuracy (on validation data): 0.89
###Markdown
SolutionClick below for one possible solution. Here is a set of parameters that should attain roughly 0.9 accuracy.
###Code
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Replace the Linear Classifier with a Neural Network**Replace the LinearClassifier above with a [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) and find a parameter combination that gives 0.95 or better accuracy.**You may wish to experiment with additional regularization methods, such as dropout. These additional regularization methods are documented in the comments for the `DNNClassifier` class.
###Code
# YOUR CODE HERE: Replace the linear classifier with a neural network.
def train_DNN_model(
learning_rate,
regularization,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a DNN classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a DNNClassifier object.
my_optimizer = tf.train.ProximalAdagradOptimizer(learning_rate=learning_rate,
l1_regularization_strength=regularization)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_DNN_model(
learning_rate=0.03,
regularization=0.001,
steps=500,
batch_size=50,
hidden_units=[100,100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
Training model...
LogLoss error (on validation data):
period 00 : 6.31
period 01 : 4.78
period 02 : 4.05
period 03 : 3.29
period 04 : 3.23
period 05 : 3.04
period 06 : 2.65
period 07 : 2.80
period 08 : 3.07
period 09 : 2.83
Model training finished.
Final accuracy (on validation data): 0.92
###Markdown
Once you have a good model, double check that you didn't overfit the validation set by evaluating on the test data that we'll load below.
###Code
mnist_test_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
# YOUR CODE HERE: Calculate accuracy on the test set.
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=20)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_pred_class_id = np.array([item['class_ids'][0] for item in test_predictions])
test_pred_one_hot = tf.keras.utils.to_categorical(test_pred_class_id,10)
# Compute test errors.
test_log_loss = metrics.log_loss(test_targets, test_pred_one_hot)
accuracy = metrics.accuracy_score(test_targets, test_pred_class_id)
print("Log Loss error (on test data): %0.2f" % test_log_loss)
print("Final accuracy (on test data): %0.2f" % accuracy)
###Output
Log Loss error (on test data): 2.27
Final accuracy (on test data): 0.93
###Markdown
SolutionClick below for a possible solution. The code below is almost identical to the original `LinearClassifer` training code, with the exception of the NN-specific configuration, such as the hyperparameter for hidden units.
###Code
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Next, we verify the accuracy on the test set.
###Code
mnist_test_dataframe = pd.read_csv(
"https://dl.google.com/mlcc/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
###Output
_____no_output_____
###Markdown
Task 3: Visualize the weights of the first hidden layer.Let's take a few minutes to dig into our neural network and see what it has learned by accessing the `weights_` attribute of our model.The input layer of our model has `784` weights corresponding to the `28×28` pixel input images. The first hidden layer will have `784×N` weights where `N` is the number of nodes in that layer. We can turn those weights back into `28×28` images by *reshaping* each of the `N` `1×784` arrays of weights into `N` arrays of size `28×28`.Run the following cell to plot the weights. Note that this cell requires that a `DNNClassifier` called "classifier" has already been trained.
###Code
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
###Output
['dnn/hiddenlayer_0/bias', 'dnn/hiddenlayer_0/bias/t_0/ProximalAdagrad', 'dnn/hiddenlayer_0/kernel', 'dnn/hiddenlayer_0/kernel/t_0/ProximalAdagrad', 'dnn/hiddenlayer_1/bias', 'dnn/hiddenlayer_1/bias/t_0/ProximalAdagrad', 'dnn/hiddenlayer_1/kernel', 'dnn/hiddenlayer_1/kernel/t_0/ProximalAdagrad', 'dnn/logits/bias', 'dnn/logits/bias/t_0/ProximalAdagrad', 'dnn/logits/kernel', 'dnn/logits/kernel/t_0/ProximalAdagrad', 'global_step']
weights0 shape: (784, 100)
|
PythonNotebooks/ROC_and_CI/Comp_Vision_Ishan_Handa_ROC_and_CI_Hedgehog.ipynb | ###Markdown
1. First we load the csv files where the positive test and the negative test results are stored for each animal.
###Code
import matplotlib.pyplot as plt
import numpy
import csv
# Change the path to csv file appropriately
hedgehog_positive_csv = '/Users/ishanhanda/Documents/NYU_Fall16/Comp_Vision/Project/ProjectWorkspace/DataSets/OUTPUTS/Hedgehog.csv'
hedgehog_negative_csv = '/Users/ishanhanda/Documents/NYU_Fall16/Comp_Vision/Project/ProjectWorkspace/DataSets/OUTPUTS/Hedgehog_neg.csv'
def get_data_from_file(file_name):
print('Loading from file' + file_name)
reader = csv.reader(open(file_name,"rt"))
temp = list(reader)
return numpy.array(temp).astype('float')
positive_data = get_data_from_file(hedgehog_positive_csv)
positive_length = len(positive_data)
print('Hedgehog positive test samples count: {}'.format(positive_length))
negative_data = get_data_from_file(hedgehog_negative_csv)
negative_length = len(negative_data)
print('Hedgehog negative test samples count: {}'.format(negative_length))
###Output
Loading from file/Users/ishanhanda/Documents/NYU_Fall16/Comp_Vision/Project/ProjectWorkspace/DataSets/OUTPUTS/Hedgehog.csv
Hedgehog positive test samples count: 51
Loading from file/Users/ishanhanda/Documents/NYU_Fall16/Comp_Vision/Project/ProjectWorkspace/DataSets/OUTPUTS/Hedgehog_neg.csv
Hedgehog negative test samples count: 51
###Markdown
2. Now we need to define the threshold points over which the ROC will be plotted.
###Code
# Here we are defining preset threshold levels for which TPR and FPR values will be calculated
thresholds = numpy.arange(0.0,1.0,0.05)
print('Thresholds: {}'.format(thresholds))
###Output
Thresholds: [ 0. 0.05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 0.45 0.5 0.55
0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]
###Markdown
Now calculating TPR and FNR for the first positive test
###Code
sample_size = min(positive_length, negative_length)
# all_TPRs and all_FPRs will be used later to evalute confidence intervals for each threshold level
all_TPRs = [[None for _ in range(sample_size)] for _ in range(len(thresholds))]
all_FPRs = [[None for _ in range(sample_size)] for _ in range(len(thresholds))]
for j in range(0, sample_size):
current_positive_sample = positive_data[j]
TPRs = [None] * len(thresholds)
current_negative_sample = negative_data[j]
FPRs = [None] * len(thresholds)
for i in range(0, len(thresholds)):
test_positive = current_positive_sample[current_positive_sample >= thresholds[i]]
tpr = len(test_positive) / len(current_positive_sample)
TPRs[i] = tpr # This is the calculated TPR value for threshold level i in sample j
all_TPRs[i][j] = tpr # The calculated TPR value is also added to all_TPR values for this threshold.(Used later to calculate confidence intervals)
test_negative = current_negative_sample[current_negative_sample >= thresholds[i]]
fpr = len(test_negative) / len(current_negative_sample)
FPRs[i] = fpr # This is the calculated FPR value for threshold level i in sample j
all_FPRs[i][j] = fpr
print('\n\nPLOTTING ROC FOR CASE: {}'.format(j))
plt.scatter(FPRs, TPRs, color='red')
plt.show()
import scipy as sp
import scipy.stats
# Function to calculate confidence interval. By default it calculated 80%.
def mean_confidence_interval(data, confidence=0.8):
a = 1.0*numpy.array(data)
n = len(a)
m, se = numpy.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, max(0.0, m-h), min(1.0 ,m+h)
# Calculating and printing Confidence Intervals for all threshold values.
thresh_s = []
ci_lower_TPR = []
ci_lower_FPR = []
ci_TPR_diff = []
ci_upper_TPR = []
ci_upper_FPR = []
ci_FPR_diff = []
print("\n\nConfidence Intervals for TPRs:")
for i in range(0, len(thresholds)):
mean_tpr, lower_tpr, upper_tpr = mean_confidence_interval(all_TPRs[i])
thresh = round(thresholds[i],2)
thresh_s.append(thresh)
diff = upper_tpr - lower_tpr
ci_TPR_diff.append(diff)
ci_lower_TPR.append(lower_tpr)
ci_upper_TPR.append(upper_tpr)
print("80% Confidence Interval of TPR with threshold {} is: {} to {}".format(thresh, lower_tpr, upper_tpr))
print("\n\nConfidence Intervals for FPRs:")
for i in range(0, len(thresholds)):
mean_fpr, lower_fpr, upper_fpr = mean_confidence_interval(all_FPRs[i])
thresh = round(thresholds[i],2)
diff = upper_fpr - lower_fpr
ci_FPR_diff.append(diff)
ci_lower_FPR.append(lower_fpr)
ci_upper_FPR.append(upper_fpr)
print("80% Confidence Interval of FPR with threshold {} is: {} to {}".format(thresh, lower_fpr, upper_fpr))
# Plotting Confidence Intervals for TPR.
import pylab
N = len(thresh_s)
ind = numpy.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(8,6))
p2 = plt.bar(ind, ci_TPR_diff, width, color='B',
bottom=ci_lower_TPR)
plt.ylabel('Confidence Intervals')
plt.xlabel('Thresholds')
plt.title('Confidence Intervals for TPR (Hedgehog)')
plt.xticks(ind + width/2., thresh_s)
plt.yticks(numpy.arange(0, 1.1, 0.05))
plt.grid()
pylab.savefig('CI_TPR_Hedgehog.png')
plt.show()
# Plotting Confidence Intervals for FPR.
fig = plt.figure(figsize=(8,6))
p2 = plt.bar(ind, ci_FPR_diff, width, color='R',
bottom=ci_lower_FPR)
plt.ylabel('Confidence Intervals')
plt.xlabel('Thresholds')
plt.title('Confidence Intervals for FPR (Hedgehog)')
plt.xticks(ind + width/2., thresh_s)
plt.yticks(numpy.arange(-0.05, 1.1, 0.05))
plt.grid()
pylab.savefig('CI_FPR_Hedgehog.png')
plt.show()
###Output
_____no_output_____ |
TESTS/workspace/mobile/.ipynb_checkpoints/index_old-checkpoint.ipynb | ###Markdown
Object Localization with TensorFlowCreated for the Coursera Guided Project: [Object Localization with TensorFlow](https://www.coursera.org/projects/object-localization-tensorflow)All emojis designed by [OpenMoji](https://openmoji.org/) – the open-source emoji and icon project. License: CC BY-SA 4.0 Task 2: Download and Visualize Data
###Code
!wget https://github.com/hfg-gmuend/openmoji/releases/latest/download/openmoji-72x72-color.zip
!mkdir emojis
!unzip -q openmoji-72x72-color.zip -d ./emojis
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from PIL import Image, ImageDraw
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPool2D, BatchNormalization, Dropout
print('Using TensorFlow version', tf.__version__)
emojis = {
0: {'name': 'happy', 'file': '1F642.png'},
1: {'name': 'laughing', 'file': '1F602.png'},
2: {'name': 'skeptical', 'file': '1F928.png'},
3: {'name': 'sad', 'file': '1F630.png'},
4: {'name': 'cool', 'file': '1F60E.png'},
5: {'name': 'whoa', 'file': '1F62F.png'},
6: {'name': 'crying', 'file': '1F62D.png'},
7: {'name': 'puking', 'file': '1F92E.png'},
8: {'name': 'nervous', 'file': '1F62C.png'}
}
plt.figure(figsize=(9, 9))
for i, (j, e) in enumerate(emojis.items()):
plt.subplot(3, 3, i + 1)
plt.imshow(plt.imread(os.path.join('emojis', e['file'])))
plt.xlabel(e['name'])
plt.xticks([])
plt.yticks([])
plt.show()
###Output
_____no_output_____
###Markdown
Task 3: Create Examples
###Code
for class_id, values in emojis.items():
png_file = Image.open(os.path.join('emojis', values['file'])).convert('RGBA')
png_file.load()
new_file = Image.new("RGB", png_file.size, (255, 255, 255))
new_file.paste(png_file, mask=png_file.split()[3])
emojis[class_id]['image'] = new_file
emojis
def create_example():
class_id = np.random.randint(0, 9)
image = np.ones((144, 144, 3)) * 255
row = np.random.randint(0, 72)
col = np.random.randint(0, 72)
image[row: row + 72, col: col + 72, :] = np.array(emojis[class_id]['image'])
return image.astype('uint8'), class_id, (row + 10) / 144, (col + 10) / 144
image, class_id, row, col = create_example()
plt.imshow(image);
###Output
_____no_output_____
###Markdown
Task 4: Plot Bounding Boxes
###Code
def plot_bounding_box(image, gt_coords, pred_coords=[], norm=False):
if norm:
image *= 255.
image = image.astype('uint8')
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
row, col = gt_coords
row *= 144
col *= 144
draw.rectangle((col, row, col + 52, row + 52), outline='green', width=3)
if len(pred_coords) == 2:
row, col = pred_coords
row *= 144
col *= 144
draw.rectangle((col, row, col + 52, row + 52), outline='red', width=3)
return image
image = plot_bounding_box(image, gt_coords=[row, col])
plt.imshow(image)
plt.title(emojis[class_id]['name'])
plt.show()
###Output
_____no_output_____
###Markdown
Task 5: Data Generator
###Code
def data_generator(batch_size=16):
while True:
x_batch = np.zeros((batch_size, 144, 144, 3))
y_batch = np.zeros((batch_size, 9))
bbox_batch = np.zeros((batch_size, 2))
for i in range(0, batch_size):
image, class_id, row, col = create_example()
x_batch[i] = image / 255.
y_batch[i, class_id] = 1.0
bbox_batch[i] = np.array([row, col])
yield {'image': x_batch}, {'class_out': y_batch, 'box_out': bbox_batch}
example, label = next(data_generator(1))
image = example['image'][0]
class_id = np.argmax(label['class_out'][0])
coords = label['box_out'][0]
image = plot_bounding_box(image, coords, norm=True)
plt.imshow(image)
plt.title(emojis[class_id]['name'])
plt.show()
###Output
_____no_output_____
###Markdown
Task 6: Model
###Code
input_ = Input(shape=(144, 144, 3), name='image')
x = input_
for i in range(0, 5):
n_filters = 2**(4 + i)
x = Conv2D(n_filters, 3, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPool2D(2)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
class_out = Dense(9, activation='softmax', name='class_out')(x)
box_out = Dense(2, name='box_out')(x)
model = tf.keras.models.Model(input_, [class_out, box_out])
model.summary()
###Output
_____no_output_____
###Markdown
Task 7: Custom Metric: IoU
###Code
class IoU(tf.keras.metrics.Metric):
def __init__(self, **kwargs):
super(IoU, self).__init__(**kwargs)
self.iou = self.add_weight(name='iou', initializer='zeros')
self.total_iou = self.add_weight(name='total_iou', initializer='zeros')
self.num_ex = self.add_weight(name='num_ex', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
def get_box(y):
rows, cols = y[:, 0], y[:, 1]
rows, cols = rows * 144, cols * 144
y1, y2 = rows, rows + 52
x1, x2 = cols, cols + 52
return x1, y1, x2, y2
def get_area(x1, y1, x2, y2):
return tf.math.abs(x2 - x1) * tf.math.abs(y2 - y1)
gt_x1, gt_y1, gt_x2, gt_y2 = get_box(y_true)
p_x1, p_y1, p_x2, p_y2 = get_box(y_pred)
i_x1 = tf.maximum(gt_x1, p_x1)
i_y1 = tf.maximum(gt_y1, p_y1)
i_x2 = tf.minimum(gt_x2, p_x2)
i_y2 = tf.minimum(gt_y2, p_y2)
i_area = get_area(i_x1, i_y1, i_x2, i_y2)
u_area = get_area(gt_x1, gt_y1, gt_x2, gt_y2) + get_area(p_x1, p_y1, p_x2, p_y2) - i_area
iou = tf.math.divide(i_area, u_area)
self.num_ex.assign_add(1)
self.total_iou.assign_add(tf.reduce_mean(iou))
self.iou = tf.math.divide(self.total_iou, self.num_ex)
def result(self):
return self.iou
def reset_state(self):
self.iou = self.add_weight(name='iou', initializer='zeros')
self.total_iou = self.add_weight(name='total_iou', initializer='zeros')
self.num_ex = self.add_weight(name='num_ex', initializer='zeros')
###Output
_____no_output_____
###Markdown
Task 8: Compile the Model
###Code
model.compile(
loss={
'class_out': 'categorical_crossentropy',
'box_out': 'mse'
},
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
metrics={
'class_out': 'accuracy',
'box_out': IoU(name='iou')
}
)
###Output
_____no_output_____
###Markdown
Task 9: Custom Callback: Model Testing
###Code
def test_model(model, test_datagen):
example, label = next(test_datagen)
x = example['image']
y = label['class_out']
box = label['box_out']
pred_y, pred_box = model.predict(x)
pred_coords = pred_box[0]
gt_coords = box[0]
pred_class = np.argmax(pred_y[0])
image = x[0]
gt = emojis[np.argmax(y[0])]['name']
pred_class_name = emojis[pred_class]['name']
image = plot_bounding_box(image, gt_coords, pred_coords, norm=True)
color = 'green' if gt == pred_class_name else 'red'
plt.imshow(image)
plt.xlabel(f'Pred: {pred_class_name}', color=color)
plt.ylabel(f'GT: {gt}', color=color)
plt.xticks([])
plt.yticks([])
def test(model):
test_datagen = data_generator(1)
plt.figure(figsize=(16, 4))
for i in range(0, 6):
plt.subplot(1, 6, i + 1)
test_model(model, test_datagen)
plt.show()
test(model)
class ShowTestImages(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
test(self.model)
###Output
_____no_output_____
###Markdown
Task 10: Model Training
###Code
def lr_schedule(epoch, lr):
if (epoch + 1) % 5 == 0:
lr *= 0.2
return max(lr, 3e-7)
_ = model.fit(
data_generator(),
epochs=50,
steps_per_epoch=500,
callbacks=[
ShowTestImages(),
tf.keras.callbacks.EarlyStopping(monitor='box_out_iou', patience=3, mode='max'),
tf.keras.callbacks.LearningRateScheduler(lr_schedule)
]
)
###Output
_____no_output_____ |
ch07-scaling/Recipe-3-robust-scaling.ipynb | ###Markdown
Scaling to quantiles and median - RobustScalingIn this procedure the median is removed from the observations and then they are scaled to the inter-quantile range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile).X_scaled = X - X_median / ( X.quantile(0.75) - X.quantile(0.25) )
###Code
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
# the scaler - for robust scaling
from sklearn.preprocessing import RobustScaler
# load the California House price data from Scikit-learn
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
# Remove 2 variables:
X.drop(labels=["Latitude", "Longitude"], axis=1, inplace=True)
# display top 5 rows
X.head()
# let's separate the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
X_train.shape, X_test.shape
# set up the scaler
scaler = RobustScaler()
# fit the scaler to the train set, it will learn the parameters
scaler.fit(X_train)
# transform train and test sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# the scaler stores the median values of the features as learned from train set
scaler.center_
# the scaler stores the IQR values of the features as learned from train set
scaler.scale_
# let's transform the returned NumPy arrays to dataframes
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
# Inspect the original value statistics
X_test.describe()
# inspect the values after scaling
X_test_scaled.describe()
X_test.hist(bins=20, figsize=(20, 12), layout=(2, 3))
plt.show()
X_test_scaled.hist(bins=20, figsize=(20, 12), layout=(2, 3))
plt.show()
###Output
_____no_output_____ |
tests/Distance Calculations Example.ipynb | ###Markdown
Calculate cosmological distances with CCLIn this example, we will calculate various cosmological distances for an example cosmology.
###Code
import numpy as np
import pylab as plt
import pyccl as ccl
###Output
_____no_output_____
###Markdown
Set up a Cosmology object`Cosmology` objects contain the parameters and metadata needed as inputs to most functions. Each `Cosmology` object has a set of cosmological parameters attached to it. In this example, we will only use the parameters of a vanilla LCDM model, but simple extensions (like curvature, neutrino mass, and w0/wa) are also supported.`Cosmology` objects also contain precomputed data (e.g. splines) to help speed-up certain calculations. As such, `Cosmology` objects are supposed to be immutable; you should create a new `Cosmology` object when you want to change the values of any cosmological parameters.
###Code
cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045, h=0.67, A_s=2.1e-9, n_s=0.96)
print cosmo
###Output
Parameters
----------
Omega_c: 0.27
Omega_b: 0.045
Omega_m: 0.315
Omega_k: 0.0
Omega_l: 0.684927855904
w0: -1.0
wa: 0.0
H0: 67.0
h: 0.67
A_s: 2.1e-09
n_s: 0.96
N_nu_rel: 3.046
N_nu_mass: 0.0
mnu: 0.0
Omega_n_mass: 0.0
Omega_n_rel: 1.70947512533e-05
T_CMB: 2.725
Omega_g: 5.50493446829e-05
z_star: nan
has_mgrowth: False
Precomputed data
----------------
has_distances: False
has_growth: False
has_power: False
has_sigma: False
Status
------
status(0): C
###Markdown
As you can see, a number of cosmological parameters have been set to default values, or derived from the input parameters. Some, like `sigma_8`, have been left undefined; this is because calculating them from the input parameters is non-trivial, so this will only be done if needed (or if the user explicitly requests it). Parameter values can be accessed from the `Parameters` object that the `Cosmology` object contains, like so:
###Code
print cosmo.params['Omega_c']
###Output
0.27
###Markdown
Cosmological DistancesWith a cosmology in hand, we can begin performing some calculations. We can start with the most basic measure, the comoving radial distance.
###Code
z = 0.5
ccl.comoving_radial_distance(cosmo, 1/(1+z)) # Mpc
###Output
_____no_output_____
###Markdown
Note that all distance function calls require scale factors, not redshifts. This function can take a `numpy` array of values as well.
###Code
zs = np.arange(0, 1, 0.1)
ccl.comoving_radial_distance(cosmo, 1/(1+zs))
###Output
_____no_output_____
###Markdown
CCL also supports calculation of the comoving angular distance. In flat spacetime (like the cosmology we have here) it is the same as the radial distance.
###Code
ccl.comoving_angular_distance(cosmo, 1/(1+z))
###Output
_____no_output_____
###Markdown
If we create a cosmology with curvature, we'll get a different result.
###Code
curved_cosmo = ccl.Cosmology(Omega_k = 0.1, Omega_c=0.17, Omega_b=0.045, h=0.67, A_s=2.1e-9, n_s=0.96)
chi_rad = ccl.comoving_radial_distance(curved_cosmo, 1/(1+z))
chi_curved = ccl.comoving_angular_distance(curved_cosmo, 1/(1+z))
print 'Radial Dist. = %.2f Mpc \t Angular Dist. = %.2f Mpc'%(chi_rad, chi_curved)
###Output
Radial Dist. = 1992.55 Mpc Angular Dist. = 1999.14 Mpc
###Markdown
CCL explictly supports the calculation of the luminosity distance and the distance modulus too:
###Code
chi_lum = ccl.luminosity_distance(cosmo, 1/(1+z))
DM = ccl.distance_modulus(cosmo, 1/(1+z))
print 'Luminosity Dist = %.2f Mpc \t Distance Modulus = %.2f ' % (chi_lum, DM)
###Output
Luminosity Dist = 2944.44 Mpc Distance Modulus = 42.35
###Markdown
Finally, CCL supports an inverse operation, which calculates the scale factor for a given comoving distance:
###Code
ccl.scale_factor_of_chi(cosmo, 1962.96)
###Output
_____no_output_____ |
regular_expressions_in_Python_notebook.ipynb | ###Markdown
Regular expressions and patterns in sequences.It is a recurring theme of patterns in biological sequences that some positions are observed to be more conserved than others. The variable positions may be more variable because mutations are less likely to occur at these positions - or that they are not functionally or structurally crucial. In coding regions of exons the degeneracy of the genetic code means that mutations at the third position of codons may not change the resulting protein. And at the protein level some amino acid residues are more similar to each other and can be inter-changeable. For example both Asp and Glu may supply a negatively-charged side-chains, both Arg and Lys a positively-charged ones.Regular expressions are a flexible way to specify an underlying pattern while still allowing for such variation. For this reason various syntaxes based on regular expressions are found in user interfaces of bioinformatic programs and databases.
###Code
# run this cell to check your Python version is OK for this notebook!
import sys
def check_python_version_above_3_6():
major = sys.version_info.major
minor = sys.version_info.minor
if major < 3 or minor < 6:
print('ERROR you need to run this notebook with Python 3.6 or above (as f-strings used)')
print('ERROR current Python version is {}.{}'.format(major, minor))
print('ERROR Please see:\n',
' https://canvas.anglia.ac.uk/courses/15139/pages/azure-notebooks-switching-kernel\n'
' for information on switching kernel on Azure Notebooks')
else:
print('Python version {}.{} you are good to go'.format(major, minor))
check_python_version_above_3_6()
###Output
_____no_output_____
###Markdown
DataCamp Python Regular Expression TutorialRegular expressions are very powerful but take a bit of getting used. So first work through this DataCamp tutorial https://www.datacamp.com/community/tutorials/python-regular-expression-tutorialAdd notebook cells below as you work through the DataCamp tutorial.
###Code
# work through DataCamp tutorial
###Output
_____no_output_____
###Markdown
Python Regular Expressions Cheat Sheet.Regular expressions are a bit complicated and a good one page cheat sheet is a great help. So print out or store:https://www.dataquest.io/wp-content/uploads/2019/03/python-regular-expressions-cheat-sheet.pdf Regular expressions in PythonPython has sophisticated regular expression functions available in the module *re*.
###Code
# run this cell to import Python regular expression library re
import re
###Output
_____no_output_____
###Markdown
Regular expressions use a range of special characters in patterns. And as here is a limited number of special characters some of these clash with usage in Python. One way around this is to preface strings with `r` for raw. Compare the print output of the following two strings.
###Code
# run this cell to see how Python process \t and \n
print("\t1\n2")
# run this cell to see how the preface r means string is made "raw"
print(r"\t1\n2")
###Output
_____no_output_____
###Markdown
The regular expression `re search` function https://docs.python.org/3/library/re.htmlre.search can be used to find patterns in reasonably short sequences. For example, restriction enzymes have specific recognition sites in DNA. For simplicity this exercise ignore the fact that DNA has two strands!
###Code
# run this cell to see a simple regular expression with re search
dna = "TATAGAATTCATAAATT"
if re.search(r"GAATTC", dna):
print("EcoRI site found.")
###Output
_____no_output_____
###Markdown
Note in the example above there was no need for the pattern to be made a raw string but it does not hurt.Of course for an exact match like this there are the usual string methods available of the form `text.find(substring)`. But some restriction enzymes recognise ambiguous sequences. You will be aware of the [ambiguous nomenclature for DNA bases](https://www.dnabaser.com/articles/IUPAC%20ambiguity%20codes.html): for example: R is any purine (A or G), Y is a pyrimidine (C or T). Unfortunately Python does not recognise these codes out of the box. As an example *AvaII* cuts the pattern GGWCC where W is either an A or a T (the converse is S for C or G). So this can be expressed using the regular expression symbol | for alternatives.
###Code
dna = "TTATCGGTCCGC"
if re.search(r"GG(A|T)CC", dna):
print("AvaII site found.")
###Output
_____no_output_____
###Markdown
*BisI* cuts the pattern GCNGC where, as you know., N stands for a nucleotide with any base.
###Code
dna = "TCTTAGCAGCAATTCCGC"
if re.search(r"GC(A|C|G|T)GC", dna):
print("BisI site found.")
###Output
_____no_output_____
###Markdown
Or by including a character class with a list of the alternatives.
###Code
dna = "TCTTAGCAGCAATTCCGC"
if re.search(r"GC[ACGT]GC", dna):
print("BisI site found.")
###Output
_____no_output_____
###Markdown
The symbol . will match any character - so that could match any nucleotide.
###Code
dna = "TCTTAGCAGCAATTCCGC"
if re.search(r"GC.GC", dna):
print("BisI site found.")
###Output
_____no_output_____
###Markdown
Unfortunately it would also match GCQGC, GCWGC, and even GC.GC Repeats of characters can be specified using symbols ? (0 or 1 times), * (0 to infinity), or + (1 to infinity). Notice that unlike the case of * in linux the modifier applies to the symbol in front of them.For instance, to search for at least 3A's in a row:
###Code
dna = "TCTTAGCAGCAAAAAAAAAAAAATTCCGC"
if re.search(r"AAA+", dna):
print("poly(A) found.")
###Output
_____no_output_____
###Markdown
Specific numbers can be given as a number range in {} after the character. For example {n} for a single specific number, {n,m} for number n to number m times, {n,} for n to infinity times {,m} for 0 to m times. Multicharacter patterns can be grouped together using parentheses. For example an intronic region in the human VWF gene contains variable numbers of tetranucleotide repeats that are used for forensic identification. Alleles differ in the number of repeats. Here is a check on an individual for the commonest short variants of that which have TCTA[TCTG]3-4[TCTA]7-11.
###Code
dna = "TTGATTCTATCTGTCTGTCTGTCTGTCTATCTATCTATCTATCTATCTATCTATCTTCCA"
if re.search(r"TCTA(TCTG){3,4}(TCTA){7,11}", dna):
print("STR allele found.")
###Output
_____no_output_____
###Markdown
Full specification of the use of special characters are in the documentation at https://docs.python.org/3/library/re.html User exercise (a) - regular expression for the restriction enzyme HinfIFrom https://international.neb.com/products/r0155-hinfiProduct%20Information find out the recognition sequence for the restriction enzyme HinfI. Then write Python code using a regular expression to check whether HinfI will cut the following sequences:
###Code
test_dnas = {'sequence_a' : 'TTGATGCTATCTGTCTGTCTGTCTGTCTATCTATCTATCTATCTATCTATCTATCTTCCA',
'sequence_b' : 'TTGATTCTATCTGTCTGTCTGTCTGATTCATCTATCTATCTATCTATCTATCTATCTTCCA',
'sequence_c' : 'AAAGATTCAAA',
'sequence_d' : 'AAACTTAGA'}
###Output
_____no_output_____
###Markdown
Your code should produce output of the form:```sequence_a not cut by HinfIsequence_b is cut by HinfIsequence_c is cut by HinfIsequence_d not cut by HinfI```**Please note that you will be asked for your code and its output in this week's quiz**
###Code
# your Python code
###Output
_____no_output_____
###Markdown
Match and regex objectsThe examples above give the impression that the `re search` function returns either `True` or `False`. But this is not the case, instead it returns either `None` if not match is found or [match object](https://docs.python.org/3/library/re.htmlmatch-objects) that has a boolean value of `True`.A match object represents the results of a regular expression `search` and has a number of useful methods for getting data out of it.Going back to the *AvaII* example.
###Code
# run this cell to see how match object works
dna = "TTATCGGTCCGC"
avaii_match = re.search(r"GG(A|T)CC", dna)
if avaii_match:
print('AvaII site found.')
print('string that was matched:', avaii_match.group())
print('index in string for start of match: ', avaii_match.start())
print('index in string after end of match: ', avaii_match.end())
print('index (start to end+1) for match: ', avaii_match.span())
else:
print('AvaII site not found.')
# user mini exercise modify this code to print out the length of the poly-A match in the string:
dna = "TCTTAGCAGCAAAAAAAAAAAAATTCCGC"
if re.search(r"AAA+", dna):
print("poly(A) found.")
###Output
_____no_output_____
###Markdown
If a regular expression is used multiple times it is more efficient to *compile* it into a regular expression object using the [`re compile` function](https://docs.python.org/3/library/re.htmlre.compile) Remember in general you need to check that the pattern found a match otherwise the search will return `None` and an exception will occur as there is nothing to interrogate or print. Here we apply a search for the AvaII restriction site to a sequences and a mutated form but only one returns the match:
###Code
# run this cell to see how to use a compiled re
seqs = ["TTATCGGTCCGC","TTATCGGGCCGC"]
avaii_re = re.compile(r"GG(A|T)CC")
for seq in seqs:
match = avaii_re.search(seq)
if match:
print('AvaII site found at:', match.span())
else:
print("AvaII site not found.")
###Output
_____no_output_____
###Markdown
finding multiple occurences of a patternThe `re.search(pattern,string)` function (https://docs.python.org/3/library/re.htmlre.search) will find the first location where regular expression matches.For finding multiple occurrences there is the function `re.finditer(pattern, string)` (https://docs.python.org/3/library/re.htmlre.finditer). For example, ambiguous bases in a sequence can be found using the expression [^ATGC] where the ^ character inverts the selection (meaning not A T G or C). (Please note, outside square brackets [ ] the ^ character is used to mark the position of the pattern as the start of the string).
###Code
# run this cell to see how re.finditer can be used
dna = 'GGTGAGRTAAGAAGGGGYTAAGAGAGGATWAGG'
ambiguous_base = re.compile(r'[^ATGC]')
for match in ambiguous_base.finditer(dna):
base = match.group()
pos = match.start() + 1 # sequence position with 1 for start
print(f"{base} found at position {pos}")
###Output
_____no_output_____
###Markdown
Splitting a sequence using a regular expressionThere is a function `re.search(pattern,string)` https://docs.python.org/3/library/re.htmlre.split to split a string based on a regular expression. Here the sequence is split at each ambiguous base using the regex object `ambiguous_base` defined above. Notice that the actual pattern is omitted from the output strings.
###Code
print(ambiguous_base.split(dna))
###Output
_____no_output_____
###Markdown
Further examples of regular expressions for sequence manipulation are covered in *Chapter 5* of Rocha & Ferreira (2008) *Bioinformatics Algorithms*. User exercise (b) finding restriction enzymes sites on a cloning vector plasmidPlasmids are circular bits of DNA. We will use pBR322 as an example. First read the wikipedia page on pBR322https://en.wikipedia.org/wiki/PBR322In this exercise we want to find the number of cut sites for a set of restriction enzymes on pBR322 and the position of the first restriction site on the plasmid.| Restriction enzyme| recognition sequence$| ----------------- |---------------------| HindIII | AAGCTT| EcoRV | GATATC| EcoRI | GAATTC| BisI | GCNGC| AvaII | GGWCC| XmaI | CCCGGG$Please note that [ambiguity codes](https://www.dnabaser.com/articles/IUPAC%20ambiguity%20codes.html) are used.The expected result for the first three enzymes is shown on this schematic representation: Further information at https://www.neb.com/~/media/nebus/page%20images/tools%20and%20resources/interactive%20tools/dna%20sequences%20and%20maps/pbr322_map.pdf
###Code
# run this cell to download the DNA sequence of pbr322 and store it as pbr_322
import requests
def supply_pbr322_sequence():
"""returns DNA sequence of pBR322 plasmid from ENA"""
url = 'https://www.ebi.ac.uk/ena/browser/api/fasta/J01749.1?download=true'
sequence = requests.get(url).text
lines = sequence.splitlines()
lines.pop(0) # get rid of header
sequence = ''.join(lines)
return sequence
pbr322_dna = supply_pbr322_sequence()
###Output
_____no_output_____
###Markdown
First write python to check that pbr322 is 4361 base pairs long
###Code
# write python to check that pbr322_dna has 4361 base pairs as expected
# now write Python to report the number of times each restriction enzyme cuts or
# that it does not cut. You should use regular expressions.
# You are recommend to store the regular expression patterns in a Python dictionary
# with the restriction enzyme name as a key.
###Output
_____no_output_____
###Markdown
**Please note that you will be asked for your code and its output in this week's quiz** Advanced exercise using Biopython `Restriction` classIf you are actually working with restriction enzymes there is no need to reinvent the wheel as Biopython already has an excellent `Restriction` class. You will need to install biopython to use it, in conda this is easy:```conda install biopython```
###Code
# this should install biopython on Azure notebooks
# https://notebooks.azure.com/help/jupyter-notebooks/package-installation
!conda install biopython -y
# import the Restiction class from BioPython checking
try:
from Bio import Restriction
except ModuleNotFoundError:
print('ERROR BioPython not available you will need to install it')
###Output
_____no_output_____
###Markdown
The Restriction class is really easy to use see http://biopython.org/DIST/docs/cookbook/Restriction.html we will have a quick look here.=
###Code
# run this cell to see how the Restriction class knows about AvaII
my_enzyme = Restriction.AvaII
print(f'{my_enzyme} has a restriction site {my_enzyme.site}')
# run this cell to get pbr322 sequence into biopython
# there is probably a better way provided by ?????
pbr322_dna = supply_pbr322_sequence() # defined above
from Bio.Seq import Seq
from Bio.Alphabet.IUPAC import IUPACAmbiguousDNA
amb = IUPACAmbiguousDNA()
pbr322_seq = Seq(pbr322_dna, amb)
# run this cell to see where there are BisI recognition sites in pbr322_seq
sites = my_enzyme.search(pbr322_seq)
print(f'Restriction sites for {my_enzyme} : {sites}')
###Output
_____no_output_____
###Markdown
**Advanced user exercise** Repeat exercise (b) above using Biopython `Restriction` class. Remember the DNA is circular - see http://biopython.org/DIST/docs/cookbook/Restriction.html1.5
###Code
# write Python repeating (b) using biopython rather than re
###Output
_____no_output_____ |
apt_presale_price.ipynb | ###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
pre_sale = pd.read_csv('data/apt_price_201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,550원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,750원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\a\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\a\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/apt_aveprice_national_1509.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 18))
)
###Output
C:\Users\a\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 '신규' 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
pre_sale = pd.read_csv('data/전국_평균_분양가격_2018.6월_.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail() # Nan 결측지도 있네
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경 ( 따로 연산하지않을거니까 )
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다. 3.3제곱미터당 가격이 평당가격
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info() # 제곱미터는 써주기 불편해서 그냥 '분약가격'이라는 컬럼을 만들어주고 타입을 float64로 만듬
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum() # 공백은 결측치로 안잡히는데 숫자로 변환하면서 공백 -> NaN 으로 바뀌었기 때문에 더욱 많다.
pre_sale.describe() # 뒤에가 1000단위 평균은 평당 천만원
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도') # 4인기준이면 보통 85초과 102이하 가 인기 많다
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'].astype(int)*1000)
min_delta_price = np.min(region_year['변동액'].astype(int)*1000)
mean_delta_price = np.mean(region_year['변동액'].astype(int)*1000)
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,000원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,000원이다.
전국 평균 변동액은 평당 1,666,647원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/지역별_3.3㎡당_평균_분양가격_천원__15.09월.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
# 일단 마지막 3개의 컬럼은 필요도 없을 것 같고,,
# 0번과 1번을 합치고
# 시도와 시군구도 합쳐보자
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year # 컬럼을 year로 해주고
df = df.drop(df.index[[0,1]]) # 인덱스 0과 1은 안쓰니까 날려버리
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy() # 컬럼지정
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월']) # 전월비, 전년말비, 전년동월비는 빼버림
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int) # 컴마빼주고 int로 바꾸고 분양가격으로 바꿔줌
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare]) # 합치기 concat은 위 아래로 합쳐준다, merge는 가로로 합쳐짐
df_2013_2018.shape
df_2013_2018.head() # 합친것 한번 보기
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
# 전국 | 수도권과 공백인것은 드랍시킨다.
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
_____no_output_____
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls
pre_sale = pd.read_csv('201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015'].astype(int)
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,750원이다.
상승액이 가장 작은 지역은 울산이며 평당 388,650원이다.
전국 평균 변동액은 평당 1,667,735원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('201509.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
###Output
_____no_output_____
###Markdown
수도권을 지워 주는 과정
###Code
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
###Output
_____no_output_____
###Markdown
수도권과 공백을 지워주는
###Code
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
A = (ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
ggsave(A, device = 'jpeg',path = 'project')
###Output
_____no_output_____
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data/apt_price
pre_sale = pd.read_csv('data/apt_price/전국_평균_분양가격_2018.6월_.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액']).astype(int)*1000
min_delta_price = np.min(region_year['변동액']).astype(int)*1000
mean_delta_price = np.mean(region_year['변동액']).astype(int)*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
_____no_output_____
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/apt_price/지역별_3.3㎡당_평균_분양가격_천원__15.09월.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
%pwd
%ls
pre_sale = pd.read_csv('data/price_201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자to_numeric로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요. 평당분양가격의 칼럼을 만듭니다.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3 # 1평인 3.3제곱미터를 곱해줍니다.
pre_sale.info()
# 분양가격에 결측치가 많이 있어요. null값을 뺀 데이터의 수를 구해봅니다.
pre_sale.isnull().sum()
pre_sale.describe() # 통계량보기, 평균 1평당 천만원인 것을 확인
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
# 2015년의 평당분양가격을 int타입으로 변경해주고 2018년데이터와 빼서 변동액 칼럼을 만들어준다.
region_year['변동액'] = region_year['2018'] - region_year['2015'].astype(int)
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,750원이다.
상승액이 가장 작은 지역은 울산이며 평당 388,650원이다.
전국 평균 변동액은 평당 1,667,735원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/price_201509.csv',encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0] # year에 인덱스0번df을 넣어줌
month = df.iloc[1] # month에 인덱스1번df을 넣어줌
# 결측치가 엄청 많다, 년월을 합쳐줄 필요가 있음
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다. 칼럼으로 사용할 year을 만들어줌
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year # 칼럼을 year로 설정
df = df.drop(df.index[[0,1]])
df # 구분과 시군구도 변경해줘야할 것으로 보임
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
# 람다 함수를 사용해서 부산과 강원을 변경해준다
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int) # 콤마를 빼주고 int화 시킴
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
# 규모구분에서 전체만 뽑아서 df_2015_2018이라는 df를 만들어줌
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
# 평당분양가를 봐야함!
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
# concat으로 df_2013_2015_prepare의 아래로 df_2015_2018_prepare을 붙여준다.
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 제주도의 평당분양가격 추이를 박스플롯으로 보기
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
C:\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
pre_sale = pd.read_csv('data/201806_apt_price.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015'] # 3년동안 가격이 어떻게 변했는지
max_delta_price = np.max(region_year['변동액'].astype(int)*1000)
min_delta_price = np.min(region_year['변동액'].astype(int)*1000)
mean_delta_price = np.mean(region_year['변동액'].astype(int)*1000)
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,000원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,000원이다.
전국 평균 변동액은 평당 1,666,647원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\wolever\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\wolever\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/201509_apt_price_33.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
# 분양가격은 콤마를 빼주고 인트로 변경해 줌.
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
# 서로 겹치는 지역명만을 찾아서 drop시켜 줌.
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
C:\Users\wolever\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
ls open-data-apt
pwd()
pre_sale = pd.read_csv('county_mean_price_201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액']).astype(int)*1000
min_delta_price = np.min(region_year['변동액']).astype(int)*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,000원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,000원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
_____no_output_____
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('area_3.3_mean_price_thousand_won_201509.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
_____no_output_____
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
pre_sale = pd.read_csv('data/전국 평균 분양가격(2018.6월).csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
# 따로 연산할 필요 없으므로.
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
# astype 이나 to_numeric 을 이용해서 숫자로 변경할 수 있다.
# 분양가격이라는 새 column 만든다. 분양가격(m2)에 있던 값을 누메릭으로 넣어 준다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
# 평당분양가격이라는 새 column 만들고, 분양가격에 3.3 곱해서 value 들을 넣어준다.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015'].astype(int)
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,750원이다.
상승액이 가장 작은 지역은 울산이며 평당 388,650원이다.
전국 평균 변동액은 평당 1,667,735원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
###Output
/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='Noto Sans CJK KR'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
###Output
/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
%ls data
df = pd.read_csv('data/지역별 3.3㎡당 평균 분양가격(천원)_15.09월.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='Noto Sans CJK KR'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='Noto Sans CJK KR'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='Noto Sans CJK KR'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='Noto Sans CJK KR'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
###Output
/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
# 데이터를 불러오지 못할 때는 파일 이름에서 한글을 지워준다.
pre_sale = pd.read_csv('data/2018.06.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
# 지하철의 유무에 따라서 변동금액을 살펴보자.
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,550원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,750원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='HYnamM'))
)
###Output
C:\Users\home\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='HYnamM'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='HYnamM'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'))
)
###Output
C:\Users\home\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/2015.09.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
# 정규표현식을 사용하여 부산과 강원 지역명을 바꿔준다.
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
# comma를 제거하고 type을 정수로 바꿔준다.
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='HYnamM'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='HYnamM'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='HYnamM'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='HYnamM'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='HYnamM'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
C:\Users\home\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%pwd
%ls data
pre_sale = pd.read_csv('/Users/yunkim/Desktop/dataitgirls/open-data-apt/data/National_average_selling price_~2018.6_.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head(10)
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
pre_sale.hist()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,550원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,750원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
/Users/yunkim/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:452: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
/Users/yunkim/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
%pwd
df = pd.read_csv('/Users/yunkim/Desktop/dataitgirls/open-data-apt/data/average_selling price_per _3.3㎡ by region_1000won_~15.09.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
/Users/yunkim/anaconda3/lib/python3.6/site-packages/plotnine/layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls
pre_sale = pd.read_csv('average_apt_201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head(5)
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다. #numeric이랑 astype 같은 함수
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe() #평당분양가격 단위 천억
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,550원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,750원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumGothic'))
)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'))
)
###Output
_____no_output_____
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('average_apt_201509.csv', encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]]) #0과 1행은 안쓰니까 (구분/시도)
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다. #위를 보면 아직도 NaN이 있다
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
###Output
_____no_output_____
###Markdown
melt 함수 -> 필요한 값 새로 만들어주기 (column에 있는걸 raw로 내리기)
###Code
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int) #위(2018년자료)에 하고 명칭 맞게 바꿔주는 중
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare]) #concat 함수 = 위 아래를 붙여주는 것
#merge는 가로로 붙여줌 (column끼리)
df_2013_2018.shape
df_2013_2018.head()
df_2013_2018.tail()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region #사용하면 안되는 지역명 (2013버젼엔 있지만 2018년 버젼엔 없는 것 )
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True) #전국 수도권 drop 해줌
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
_____no_output_____
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%pwd
%ls data
pre_sale = pd.read_csv('data/national_average_price.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015']
max_delta_price = np.max(region_year['변동액'])*1000
min_delta_price = np.min(region_year['변동액'])*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,550원이다.
상승액이 가장 작은 지역은 울산이며 평당 387,750원이다.
전국 평균 변동액은 평당 1,667,276원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\user1\AppData\Roaming\Python\Python36\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\Users\user1\AppData\Roaming\Python\Python36\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/regional_average_price.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
pre_sale.head()
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 12))
)
###Output
C:\Users\user1\AppData\Roaming\Python\Python36\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 17 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
전국 신규 민간 아파트 분양가격 동향* 2015년 10월부터 2018년 4월까지* 주택분양보증을 받아 분양한 전체 민간 신규아파트 분양가격 동향* https://www.data.go.kr/dataset/3035522/fileData.do
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import re
from plotnine import *
%ls data
pre_sale = pd.read_csv('data/aptprice_201806.csv', encoding='euc-kr')
pre_sale.shape
pre_sale.head()
pre_sale.tail()
# 분양가격이 숫자 타입이 아닙니다. 숫자 타입으로 변경해줄 필요가 있겠어요.
pre_sale.info()
pre_sale_price = pre_sale['분양가격(㎡)']
# 연도와 월은 카테고리 형태의 데이터이기 때문에 스트링 형태로 변경
pre_sale['연도'] = pre_sale['연도'].astype(str)
pre_sale['월'] = pre_sale['월'].astype(str)
# 분양가격의 타입을 숫자로 변경해 줍니다.
pre_sale['분양가격'] = pd.to_numeric(pre_sale_price, errors='coerce')
# 평당 분양가격을 구해볼까요.
pre_sale['평당분양가격'] = pre_sale['분양가격'] * 3.3
pre_sale.info()
# 분양가격에 결측치가 많이 있어요.
pre_sale.isnull().sum()
pre_sale.describe()
# 2017년 데이터만 봅니다.
pre_sale_2017 = pre_sale.loc[pre_sale['연도'] == 2017]
pre_sale_2017.shape
# 같은 값을 갖고 있는 걸로 시도별로 동일하게 데이터가 들어 있는 것을 확인할 수 있습니다.
pre_sale['규모구분'].value_counts()
###Output
_____no_output_____
###Markdown
전국평균 분양가격
###Code
# 분양가격만 봤을 때 2015년에서 2018년으로 갈수록 오른 것을 확인할 수 있습니다.
pd.options.display.float_format = '{:,.0f}'.format
pre_sale.groupby(pre_sale.연도).describe().T
###Output
_____no_output_____
###Markdown
규모별 전국 평균 분양가격
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '연도')
###Output
_____no_output_____
###Markdown
전국 분양가 변동금액규모구분이 전체로 되어있는 금액으로 연도별 변동금액을 살펴봅니다.
###Code
# 규모구분에서 전체로 되어있는 데이터만 가져온다.
region_year_all = pre_sale.loc[pre_sale['규모구분'] == '전체']
region_year = region_year_all.pivot_table('평당분양가격', '지역명', '연도').reset_index()
region_year['변동액'] = region_year['2018'] - region_year['2015'].astype(int)
max_delta_price = np.max(region_year['변동액']).astype(int)*1000
min_delta_price = np.min(region_year['변동액']).astype(int)*1000
mean_delta_price = np.mean(region_year['변동액'])*1000
print('2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 {:,.0f}원이다.'.format(max_delta_price))
print('상승액이 가장 작은 지역은 울산이며 평당 {:,.0f}원이다.'.format(min_delta_price))
print('전국 평균 변동액은 평당 {:,.0f}원이다.'.format(mean_delta_price))
region_year
###Output
2015년부터 2018년까지 분양가는 계속 상승했으며, 상승액이 가장 큰 지역은 제주이며 상승액은 평당 5,335,000원이다.
상승액이 가장 작은 지역은 울산이며 평당 388,000원이다.
전국 평균 변동액은 평당 1,667,735원이다.
###Markdown
연도별 변동 그래프
###Code
(ggplot(region_year_all, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\plotnine\layer.py:450: UserWarning: geom_bar : Removed 17 rows containing missing values.
self.data = self.geom.handle_na(self.data)
###Markdown
지역별 평당 분양가격 합계* 아래 데이터로 어느정도 규모로 분양사업이 이루어졌는지를 봅니다.* 전체 데이터로 봤을 때 서울, 경기, 부산, 제주에 분양 사업이 다른 지역에 비해 규모가 큰 것으로 보여지지만 분양가격대비로 나눠볼 필요가 있습니다.
###Code
pre_sale.pivot_table('평당분양가격', '규모구분', '지역명')
###Output
_____no_output_____
###Markdown
규모별
###Code
# 서울의 경우 전용면적 85㎡초과 102㎡이하가 분양가격이 가장 비싸게 나옵니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 위에 그린 그래프를 지역별로 나눠 봅니다.
(ggplot(pre_sale)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_bar(stat='identity', position='dodge')
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
# 박스플롯을 그려봅니다.
(ggplot(pre_sale, aes(x='지역명', y='평당분양가격', fill='규모구분'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
pre_sale_seoul = pre_sale.loc[pre_sale['지역명']=='서울']
(ggplot(pre_sale_seoul)
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 컸던 제주를 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='제주'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
# 2015년에서 2018년까지 분양가 차이가 가장 작았던 울산을 봅니다.
(ggplot(pre_sale.loc[pre_sale['지역명']=='울산'])
+ aes(x='연도', y='평당분양가격', fill='규모구분')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
###Output
C:\ProgramData\Anaconda3\lib\site-packages\plotnine\layer.py:363: UserWarning: stat_boxplot : Removed 38 rows containing non-finite values.
data = self.stat.compute_layer(data, params, layout)
###Markdown
2013년 12월~2015년 9월 3.3㎡당 분양가격* 2015년 10월부터 2018년 4월까지 데이터는 평당 분양가로 조정을 해주었었는데 이 데이터는 평당 분양가가 들어가 있다.
###Code
df = pd.read_csv('data/aptprice_201509.csv', \
encoding='euc-kr', skiprows=1, header=0)
df.shape
# pandas에서 보기 쉽게 컬럼을 변경해 줄 필요가 있다.
df
year = df.iloc[0]
month = df.iloc[1]
# 결측치를 채워준다.
year
# 컬럼을 새로 만들어 주기 위해 0번째와 1번째 행을 합쳐준다.
for i, y in enumerate(year):
if i > 2 and i < 15:
year[i] = '2014년 ' + month[i]
elif i >= 15:
year[i] = '2015년 ' + month[i]
elif i == 2 :
year[i] = year[i] + ' ' + month[i]
elif i == 1:
year[i] = '시군구'
print(year)
df.columns = year
df = df.drop(df.index[[0,1]])
df
# 지역 컬럼을 새로 만들어 시도와 시군구를 합쳐준다.
df['구분'] = df['구분'].fillna('')
df['시군구'] = df['시군구'].fillna('')
df['지역'] = df['구분'] + df['시군구']
df['지역']
melt_columns = df.columns.copy()
melt_columns
df_2013_2015 = pd.melt(df, id_vars=['지역'], value_vars=['2013년 12월', '2014년 1월', '2014년 2월', '2014년 3월',
'2014년 4월', '2014년 5월', '2014년 6월', '2014년 7월', '2014년 8월',
'2014년 9월', '2014년 10월', '2014년 11월', '2014년 12월', '2015년 1월',
'2015년 2월', '2015년 3월', '2015년 4월', '2015년 5월', '2015년 6월',
'2015년 7월', '2015년 8월', '2015년 9월'])
df_2013_2015.head()
df_2013_2015.columns = ['지역', '기간', '분양가']
df_2013_2015.head()
df_2013_2015['연도'] = df_2013_2015['기간'].apply(lambda year_month : year_month.split('년')[0])
df_2013_2015['월'] = df_2013_2015['기간'].apply(lambda year_month : re.sub('월', '', year_month.split('년')[1]).strip())
df_2013_2015.head()
###Output
_____no_output_____
###Markdown
지역명 강원과 부산 정리
###Code
df_2013_2015['지역'].value_counts()
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('6대광역시부산','부산', x))
df_2013_2015['지역'] = df_2013_2015['지역'].apply(lambda x: re.sub('지방강원','강원', x))
df_2013_2015['지역'].value_counts()
df_2013_2015.describe()
df_2013_2015['분양가격'] = df_2013_2015['분양가'].str.replace(',', '').astype(int)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2015, aes(x='지역', y='분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
###Output
_____no_output_____
###Markdown
이제 2013년부터 2018년 4월까지 데이터를 합칠 준비가 됨
###Code
df_2015_2018 = pre_sale.loc[pre_sale['규모구분'] == '전체']
print(df_2015_2018.shape)
df_2015_2018.head()
df_2013_2015.columns
df_2013_2015_prepare = df_2013_2015[['지역', '연도', '월', '분양가격']]
df_2013_2015_prepare.head()
df_2013_2015_prepare.columns = ['지역명', '연도', '월', '평당분양가격']
df_2015_2018.columns
df_2015_2018_prepare = df_2015_2018[['지역명', '연도', '월', '평당분양가격']]
df_2015_2018_prepare.head()
df_2015_2018_prepare.describe()
df_2013_2018 = pd.concat([df_2013_2015_prepare, df_2015_2018_prepare])
df_2013_2018.shape
df_2013_2018.head()
df_2013_2015_region= df_2013_2015_prepare['지역명'].unique()
df_2013_2015_region
df_2015_2018_region = df_2015_2018_prepare['지역명'].unique()
df_2015_2018_region
exclude_region = [region for region in df_2013_2015_region if not region in df_2015_2018_region]
exclude_region
df_2013_2018.shape
df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].head()
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'].str.match('전국|수도권')].index, axis=0, inplace=True)
df_2013_2018.drop(df_2013_2018.loc[df_2013_2018['지역명'] == ''].index, axis=0, inplace=True)
(ggplot(df_2013_2018, aes(x='연도', y='평당분양가격'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018, aes(x='지역명', y='평당분양가격', fill='연도'))
+ geom_bar(stat='identity', position='dodge')
+ theme(text=element_text(family='NanumBarunGothic'),
figure_size=(12, 6))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
df_2013_2018_jeju = df_2013_2018.loc[df_2013_2018['지역명'] == '제주']
(ggplot(df_2013_2018_jeju)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ theme(text=element_text(family='NanumBarunGothic'))
)
(ggplot(df_2013_2018)
+ aes(x='연도', y='평당분양가격')
+ geom_boxplot()
+ facet_wrap('지역명')
+ theme(text=element_text(family='NanumBarunGothic'),
axis_text_x=element_text(rotation=70),
figure_size=(12, 6))
)
###Output
_____no_output_____ |
Prototype Notebook/legacy/Geothealler big function 3D-Testing-degree0-Copy1.ipynb | ###Markdown
theano_set_3D_nugget_degree0
###Code
par2 = 1/49*10**2/14/3
par3 = 10**2/14/3
par4 = 10000
w = 1/7
nugget = 0.01
par5 = par3
CG = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[2]
G = np.concatenate(test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-3:])
G = np.append(G,[0,0,0,0])
CG,np.linalg.solve(CG,G), 10**2/14/3, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[3:6]
par2 = 1/49*10**2/14/3
par3 = 10**2/14/3
par4 = 10000
w = 1/7
nugget = 0.01
par5 = par3
CG = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[2]
G = np.concatenate(test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-3:])
G = np.append(G,[0,0,0])
CG,np.linalg.solve(CG,G)
# Printing SED
par2 = 10**2/14/3
par3 = 10**2/14/3
par4 = 10000
w = 1
nugget = 0.01
par5 = par3
s1 = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-4]
s2 = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-3]
s3 = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-2]
s4 = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[-1]
s1,s2,s3,s4
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[0];
# Calculating a,b,c
l1 = -0.0448251
l3 = 2.9136523
l4 = 0.130866
a = -0.3433333*l1/l4
b = (-0.3433333*l3+1)/l4
c = (-a*l1-b*l3)/l4
a,b,c,0.3433333*l3
0.7056028/0.0162278, 0.0162278/0.7056028, 0.1176058/0.0027041, 3.6179315/0.0199456, 0.0199456/3.6179315
181.3899556794481/43.491660811360525, 43.491660811360525/10**2/14/3
0.7056028/0.1176058 ,0.0162278/0.0027041, 3.6179315/0.1, 180/6
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[5]
a = 0.11760583830047379
b = -0.0027041052391104997
c = 0.1
CG[0,-1] = a
CG[-1,0] = a
CG[-1,2] = b
CG[2,-1] = b
CG[-1,-1] = c
CG
CG,np.linalg.solve(CG,G)
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[5]
CG[-1,0]
np.linalg.solve(CG,G)
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
10000,par3,par4,nugget,w,par5)[8]
c_sol_ult = np.array([
-0.154971249149853,
0,
3.3634073049185047,
1.6440930894388599, -2.208027365601707
])
c_sol_17 =np.array([ -0.07519608514102089913411219868066837079823017120361328125,
0,
3.33264951481644633446421721600927412509918212890625,
1.3778510792932487927231477442546747624874114990234375,
-2.295940519242440469582788864499889314174652099609375,
])
test.a.get_value()
((10/8)**2)/14/3
par2 = 10**2/14/3
par3 = -10**2/14/3
par4 = 10**2/14/3
w = 1
nugget = +0.0
par5 = 0.05
print (par3)
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par3,par4,nugget,w,par5)[2],test.geoMigueller(
dips,dips_angles,azimuths,polarity, rest, ref,
par2, par3,par4,nugget,w,par5)[1], c_sol_ult
np.linalg.solve(CG,G)
test.geoMigueller(test.dips,dips_angles,azimuths,polarity, rest, ref,
a,0,c,-0.333,8*a,f)[-1]
test.c_o.set_value(14)
test.nugget_effect_grad.set_value(-0.3)
test.potential_field = test.interpolate(test.dips,dips_angles,
azimuths,polarity, rest, ref)[0].reshape(10,10,10)
test.interpolate(dips,dips_angles,azimuths,polarity, rest, ref)[1][:,0]
test.plot_potential_field_2D(direction = "y")
c_sol=np.array([ -0.07519608514102089913411219868066837079823017120361328125,
0,
3.33264951481644633446421721600927412509918212890625,
1.3778510792932487927231477442546747624874114990234375,
-2.295940519242440469582788864499889314174652099609375,
])
# Calculation of gradients
G_x = np.sin(np.deg2rad(dips_angles)) * np.sin(np.deg2rad(azimuths)) * polarity
G_y = np.sin(np.deg2rad(dips_angles)) * np.cos(np.deg2rad(azimuths)) * polarity
G_z = np.cos(np.deg2rad(dips_angles)) * polarity
G_x, G_y, G_z
_,h1 = np.argmin((abs(test.grid - ref[0])).sum(1)), test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[0][np.argmin((abs(test.grid - ref[0])).sum(1))]
_, h2 =np.argmin((abs(test.grid - ref[1])).sum(1)), test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[0][np.argmin((abs(test.grid - ref[1])).sum(1))]
# Gradients check
G_x, G_y, G_z = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[-3:]
G_x, G_y, G_z;
# Plotting function
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cmx
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
h = np.array([h1,h2])
cm = plt.get_cmap("jet")
cNorm = matplotlib.colors.Normalize(vmin=h.min(), vmax=h.max())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
sol = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[0].reshape(200,200,200,
order = "C")[:,:,:]
#sol = np.swapaxes(sol,0,1)
from skimage import measure
isolines = np.linspace(h1,h2,2)
#vertices = measure.marching_cubes(sol, isolines[0], spacing = (0.2,0.2,0.2),
# gradient_direction = "descent")[0]
for i in isolines[0:10]:
vertices = measure.marching_cubes(sol, i, spacing = (0.05,0.05,0.05),
gradient_direction = "ascent")[0]
ax.scatter(vertices[::40,0],vertices[::40,1],vertices[::40,2],color=scalarMap.to_rgba(i),
alpha = 0.2) #color=scalarMap.to_rgba(vertices[::10,2])
ax.scatter(layers[0][:,0],layers[0][:,1],layers[0][:,2], s = 50, c = "r" )
ax.scatter(layers[1][:,0],layers[1][:,1],layers[1][:,2], s = 50, c = "g" )
ax.quiver3D(dips[:,0],dips[:,1],dips[:,2], G_x,G_y,G_z, pivot = "tail", linewidths = 2)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim(0,10)
ax.set_ylim(0,10)
ax.set_zlim(0,10)
#ax.scatter(simplices[:,0],simplices[:,1],simplices[:,2])
c_sol = np.array(([-7.2386541205560206435620784759521484375E-14],
[-1.5265566588595902430824935436248779296875E-14],
[-1.154631945610162802040576934814453125E-14],
[6.21724893790087662637233734130859375E-15],
[-5.9952043329758453182876110076904296875E-15],
[7.99360577730112709105014801025390625E-15],
[2.220446049250313080847263336181640625E-15],
[-3.641531520770513452589511871337890625E-14],
[8.0380146982861333526670932769775390625E-14],
[0.8816416857576581111999303175252862274646759033203125],
[9.355249580684368737593104015104472637176513671875],
[-0.1793850547262900996248191631821100600063800811767578125],
[0.047149729032205163481439313954979297704994678497314453125],
[-8.994519501910499315044944523833692073822021484375],
[ 0.4451793036427798000431721447966992855072021484375],
[-1.7549816402777651536126768405665643513202667236328125],
[0.0920938443689063301889063950511626899242401123046875],
[0.36837537747562587586713789278292097151279449462890625])).squeeze()
c_sol.squeeze()
# Geomodeller solutions
# this is correct
c_sol_17 =np.array([ -0.07519608514102089913411219868066837079823017120361328125,
0,
3.33264951481644633446421721600927412509918212890625,
1.3778510792932487927231477442546747624874114990234375,
-2.295940519242440469582788864499889314174652099609375,
])
c_sol_100= np.array([-0.0137274193697543359,
0,
3.0568482261959,
1.2783812756016,
-2.051133867308])
# this is correct
c_sol_10 = np.array([-0.151502417422,
0,
3.3353696310127,
1.6023015420914,
-2.111778593772])
,
c_sol_10_90 = np.array([0.419745323675709047783,
-1.06527926020528126070109E-10,
3.25838975306877864923,
1.22202703670627732535,
-2.1228757261714990001]
)
c_sol_10_2dips = np.array([ -0.451454922983293982508001,
-1.716482167839337824588,
-3.978534821682584707878E-10,
9.4238070915992040076531E-10,
2.7910108234647372782433,
2.0918189335108881010683,
2.7639520139409876620106,
0.2536147925783167056401
,]
)
c_sol_10_2dips = np.array( [
-0.49345757792304362210344947925477754,
-1.761009665135806256941464198462199,
0,
0,
2.788719784344781960072623405721969902,
2.152601573628609710198134052916429936,
2.693816367628854013815953294397331774,
0.371681174428028004985691268302616663
]
)
test.set_extent(0,1000,0,1000,0,1000)
test.a.set_value(10)
test.geoMigueller(test.dips,dips_angles,azimuths,polarity, rest, ref,
a,0,c,-0.333,8*a,f)[1]
test.a.get_value()
import pymc as pm
a = pm.Uniform('a', lower=-2, upper=1, value = 0.1 )
b = pm.Uniform('b', lower=-5, upper=1,)
c = pm.Uniform('c', lower=-100, upper=10, )
d = pm.Uniform('d', lower=-10, upper=10, value = -0.3333)
e = pm.Uniform('e', lower=-1.1, upper=10, value = 0.8 )
f = pm.Uniform('f', lower=-1.1, upper=1.1, value = 0.26666 )
@pm.deterministic
def this(value = 0, a = a ,b = b,c = c,d = d,e= e,f =f):
sol = test.geoMigueller(test.dips,dips_angles,azimuths,polarity, rest, ref,
0.17,-17**2/14/3,c,0,1,f)[1]
#error = abs(sol-c_sol)
#print (c_sol_10_2dips, sol)
return sol
like= pm.Normal("likelihood", this, 1./np.square(1e-40),
value = c_sol_17, observed = True, size = len(c_sol_17)
)
model = pm.Model([a,b,c,d,e,f, like])
M = pm.MAP(model)
M.fit()
print(
"\n a",a.value,
"\n b grad-> c_o GI",b.value,
"\n c -> does not exist", c.value,
"\n d -> nugeet", d.value,
"\n e", e.value,
"\n f",f.value)
this.value, c_sol_17, c.value/a.value
1.42/0.29
100/14/3
print(
"\n a",a.value,
"\n b",b.value,
"\n c", c.value,
"\n d", d.value,
"\n e", e.value,
"\n f",f.value)
this.value, c_sol_10_2dips, e.value/a.value
17**2/(14)
par2 = 0.00033333 * 17**2
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,1000,400000,-6,par2,1)[1], c_sol_17
par2 = 17**2/(14)
w = 0.15
print (par2)
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,par2,1,-1.01,w*par2,1)[2],test.geoMigueller(
dips,dips_angles,azimuths,polarity, rest, ref,
par2,par2,1,1.01,w*par2,1)[1], c_sol_17
par2 = 17**2/(14*3)
w = -11
print (par2)
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,1000,400000,-0.01,w*par2,1)[2], test.geoMigueller(
dips,dips_angles,azimuths,polarity, rest, ref,
par2,1000,400000,-0.34,w*par2,1)[1], c_sol_17
for i in range(0,5):
print (i, round((-0.3333-0.01*i),3),
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,
par2,0,1,-0.3333*i,-8*par2,1)[1],)
par = 0.1047
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,par,-0.333333,1,8*par,1,1)[1]
np.square(17)/0.1047, 0.1047/np.square(17)
np.square(10)/0.03, 0.03/np.square(10)
test.a.set_value(100)
par = np.square(100)*0.00033
g_sol=test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,par*0.01,0,1000000.,-1./3
,8*par*0.01,1)[1]
print(g_sol)
print(c_sol)
print(g_sol-c_sol, sum(g_sol-c_sol))
np.square(100)*0.0003,8/14, 14/8, 0.058824 * 14
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,par,-1/3,1,8*par,1,1)[1]
par = np.square(100)*0.000345
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,par,-0.01/3,0.01,8*par,1,1)[1]
0.855/0.33, 17*17/42
a.value, b.value, c.value,d.value, e.value, f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,a,b,-3*b,d,1,1)[1]
-3*b.value
a.value, b.value, c.value,d.value, e.value, f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
a.value, b.value, c.value,d.value, e.value, f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
a.value, b.value, c.value,d.value, e.value, f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
a.value, b.value, c.value,d.value, e.value, f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,0,0,-0.33,0,1,1)[1]
###Output
_____no_output_____
###Markdown
Test with all variables
###Code
a.value, b.value, c.value,d.value,e.value,f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,a,b,1,1,1,1)[1]
a.value, b.value, c.value,d.value,e.value,f.value, this.value, c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
importlib.reload(GeoMig)
test = GeoMig.GeoMigSim_pro2(c_o = np.float32(-0.1),range = 17)
test.create_regular_grid_3D(0,10,0,10,0,10,20,20,20)
test.theano_set_3D_nugget_degree0()
import matplotlib.pyplot as plt
%matplotlib inline
G_x, G_y, G_z = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[-3:]
sol = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,a,b,-3*b,d,1,-1)[0].reshape(20,20,20)
def plot_this_crap(direction):
fig = plt.figure()
ax = fig.add_subplot(111)
if direction == "x":
plt.arrow(dip_pos_1[1],dip_pos_1[2], dip_pos_1_v[1]-dip_pos_1[1],
dip_pos_1_v[2]-dip_pos_1[2], head_width = 0.2)
plt.arrow(dip_pos_2[1],dip_pos_2[2],dip_pos_2_v[1]-dip_pos_2[1],
dip_pos_2_v[2]-dip_pos_2[2], head_width = 0.2)
plt.plot(layer_1[:,1],layer_1[:,2], "o")
plt.plot(layer_2[:,1],layer_2[:,2], "o")
plt.plot(layer_1[:,1],layer_1[:,2], )
plt.plot(layer_2[:,1],layer_2[:,2], )
plt.contour( sol[25,:,:] ,30,extent = (0,10,0,10) )
if direction == "y":
plt.quiver(dips[:,0],dips[:,2], G_x,G_z, pivot = "tail")
for layer in layers:
plt.plot(layer[:,0], layer[:,2], "o")
# plt.plot(layer_1[:,0],layer_1[:,2], "o")
# plt.plot(layer_2[:,0],layer_2[:,2], "o")
# plt.plot(layer_1[:,0],layer_1[:,2], )
# plt.plot(layer_2[:,0],layer_2[:,2], )
plt.contour( sol[:,10,:].T ,30,extent = (0,10,0,10) )
if direction == "z":
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour( sol[:,:,25] ,30,extent = (0,10,0,10) )
#plt.colorbar()
#plt.xlim(0,10)
#plt.ylim(0,10)
plt.colorbar()
plt.title("GeoBulleter v 0.1")
layers
plot_this_crap("y")
a.value, b.value
test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
c_sol
h,j,k =sol[5,10,35], sol[25,5,5], sol[30,15,-25]
layer_1 = np.array([[1,5,7],[5,5,7],[6,5,7], [9,5,7]], dtype = "float32")
layer_2 = np.array([[1,5,1],[5,5,1],[9,5,1]], dtype = "float32")
print(sol[5,25,35], sol[25,25,35], sol[30,25,35], sol[45,25,35])
print(sol[5,25,5], sol[25,25,5], sol[45,25,5])
list(layer_1[0]*5)
interfaces_aux = test.geoMigueller(dips,dips_angles,azimuths,polarity,
rest, ref)[0]
h = sol[10,20,30]# interfaces_aux[np.argmin(abs((test.grid - ref[0]).sum(1)))]
k = sol[30,15,25]# interfaces_aux[np.argmin(abs((test.grid - dips[0]).sum(1)))]
j = sol[45,25,5]#interfaces_aux[np.argmin(abs((test.grid - dips[-1]).sum(1)))]
h,k,j
dips[-1], ref[0]
sol[30,15,25], sol[30,15,25]
sol = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[0].reshape(50,50,50, order = "C")
sol = np.swapaxes(sol,0,1)
plt.contour(sol[:,25,:].transpose())
"""Export model to VTK
Export the geology blocks to VTK for visualisation of the entire 3-D model in an
external VTK viewer, e.g. Paraview.
..Note:: Requires pyevtk, available for free on: https://github.com/firedrakeproject/firedrake/tree/master/python/evtk
**Optional keywords**:
- *vtk_filename* = string : filename of VTK file (default: output_name)
- *data* = np.array : data array to export to VKT (default: entire block model)
"""
vtk_filename = "noddyFunct2"
extent_x = 10
extent_y = 10
extent_z = 10
delx = 0.2
dely = 0.2
delz = 0.2
from pyevtk.hl import gridToVTK
# Coordinates
x = np.arange(0, extent_x + 0.1*delx, delx, dtype='float64')
y = np.arange(0, extent_y + 0.1*dely, dely, dtype='float64')
z = np.arange(0, extent_z + 0.1*delz, delz, dtype='float64')
# self.block = np.swapaxes(self.block, 0, 2)
gridToVTK(vtk_filename, x, y, z, cellData = {"geology" : sol})
len(x)
surf_eq.min()
np.min(z)
layers[0][:,0]
G_x = np.sin(np.deg2rad(dips_angles)) * np.sin(np.deg2rad(azimuths)) * polarity
G_y = np.sin(np.deg2rad(dips_angles)) * np.cos(np.deg2rad(azimuths)) * polarity
G_z = np.cos(np.deg2rad(dips_angles)) * polarity
a
data = [trace1, trace2]
layout = go.Layout(
xaxis=dict(
range=[2, 5]
),
yaxis=dict(
range=[2, 5]
)
)
fig = go.Figure(data=data, layout=layout)
import lxml
lxml??
# Random Box
#layers = [np.random.uniform(0,10,(10,2)) for i in range(100)]
#dips = np.random.uniform(0,10, (60,2))
#dips_angles = np.random.normal(90,10, 60)
#rest = (np.vstack((i[1:] for i in layers)))
#ref = np.vstack((np.tile(i[0],(np.shape(i)[0]-1,1)) for i in layers))
#rest;
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, cmap=cm.coolwarm)
ax.clabel(cset, fontsize=9, inline=1)
print(X)
plt.show()
import matplotlib.pyplot as plt
% matplotlib inline
plt.contour( sol.reshape(100,100) ,30,extent = (0,10,0,10) )
import matplotlib.pyplot as plt
% matplotlib inline
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour( sol.reshape(100,100) ,30,extent = (0,10,0,10) )
#plt.colorbar()
#plt.xlim(0,10)
#plt.ylim(0,10)
plt.title("GeoBulleter v 0.1")
print (dip_pos_1_v, dip_pos_2_v, layer_1)
###Output
[ 2. 5.] [ 6.34 3.94] [[ 1. 7.]
[ 5. 7.]
[ 6. 7.]
[ 9. 8.]]
###Markdown
CPU
###Code
%%timeit
sol = test.geoMigueller(dips,dips_angles,rest, ref)[0]
test.geoMigueller.profile.summary()
sys.path.append("/home/bl3/anaconda3/lib/python3.5/site-packages/PyEVTK-1.0.0-py3.5.egg_FILES/pyevtk")
nx = 50
ny = 50
nz = 50
xmin = 1
ymin = 1
zmin = 1
grid = sol
var_name = "Geology"
#from evtk.hl import gridToVTK
import pyevtk
from pyevtk.hl import gridToVTK
# define coordinates
x = np.zeros(nx + 1)
y = np.zeros(ny + 1)
z = np.zeros(nz + 1)
x[1:] = np.cumsum(delx)
y[1:] = np.cumsum(dely)
z[1:] = np.cumsum(delz)
# plot in coordinates
x += xmin
y += ymin
z += zmin
print (len(x), x)
gridToVTK("GeoMigueller", x, y, z,
cellData = {var_name: grid})
###Output
51 [ 1. 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2
1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2 1.2]
###Markdown
GPU
###Code
%%timeit
sol = test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref);
test.geoMigueller.profile.summary()
importlib.reload(GeoMig)
test = GeoMig.GeoMigSim_pro2()
from theano import function, config, shared, sandbox
import theano.tensor as T
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], T.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
from theano import function, config, shared, sandbox
import theano.tensor as T
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], T.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
from theano import function, config, shared, sandbox
import theano.tensor as T
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], T.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
np.set_printoptions(precision=2)
test.geoMigueller(dips,dips_angles,rest, ref)[1]
T.fill_diagonal?
import matplotlib.pyplot as plt
% matplotlib inline
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour( sol.reshape(50,50) ,30,extent = (0,10,0,10) )
#plt.colorbar()
#plt.xlim(0,10)
#plt.ylim(0,10)
plt.title("GeoBulleter v 0.1")
print (dip_pos_1_v, dip_pos_2_v, layer_1)
n = 10
#a = T.horizontal_stack(T.vertical_stack(T.ones(n),T.zeros(n)), T.vertical_stack(T.zeros(n), T.ones(n)))
a = T.zeros(n)
print (a.eval())
#U_G = T.horizontal_stack(([T.ones(n),T.zeros(n)],[T.zeros(n),T.ones(n)]))
T.stack?ö+aeg
x_min = 0
x_max = 10
y_min = 0
y_max = 10
z_min = 0
z_max = 10
nx = 2
ny = 2
nz = 2
g = np.meshgrid(
np.linspace(x_min, x_max, nx, dtype="float32"),
np.linspace(y_min, y_max, ny, dtype="float32"),
np.linspace(z_min, z_max, nz, dtype="float32"), indexing="ij"
)
np.vstack(map(np.ravel, g)).T.astype("float32")
map(np.ravel, g)
np.ravel(g, order = "F")
g
np.transpose?
from scipy.optimize import basinhopping
c_sol, test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,1,1,1,1,1,1)[1]
def func2d(x):
return abs((test.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref,x[0],x[1],x[2],x[3],1,1)[1] - c_sol)).sum()
minimizer_kwargs = {"method": "BFGS"}
x0 = [0.1, 0.1,0.1,0.1]
ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
niter=200)
ret
ret
ret
###Output
_____no_output_____ |
SBM_16Layer_ClusterDataset.ipynb | ###Markdown
###Code
!pip install -q condacolab
import condacolab
condacolab.install_anaconda()
###Output
✨🍰✨ Everything looks OK!
###Markdown
Load and create environment
###Code
from google.colab import drive
drive.mount('/content/drive')
%pwd
# Execution time 4m 18s
%cp -av /content/drive/MyDrive/'Colab Notebooks'/'Colab Notebooks'/benchmarking-gnns /content
%cd benchmarking-gnns
%cd /content/benchmarking-gnns/
!conda env create -f /content/benchmarking-gnns/environment_gpu.yml
!conda activate benchmark_gnn
###Output
_____no_output_____
###Markdown
Main Driver Notebook for Training Graph NNs on SBM Datasets MODELS- GatedGCN - GCN - GAT - GraphSage - MLP- GIN- MoNet- RingGNN - 3WLGNN DATASET- SBM_CLUSTER- SBM_PATTERN TASK- Node Classification
###Code
#!pip install tensorboardX
!pip install dgl-cu101
#!pip install tensorboardX
!pip install pytorch
#!pip install dgl
!pip install tensorboardX
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
#import dgl
# """
# AUTORELOAD IPYTHON EXTENSION FOR RELOADING IMPORTED MODULES
# """
def in_ipynb():
try:
cfg = get_ipython().config
return True
except NameError:
return False
notebook_mode = in_ipynb()
print(notebook_mode)
if notebook_mode == True:
%load_ext autoreload
%autoreload 2
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.SBMs_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
# select GPU or CPU
use_gpu = True; gpu_id = 0; device = None # default GPU
#use_gpu = False; gpu_id = -1; device = None # CPU
# """
# USER CONTROLS
# """
if notebook_mode == True:
#MODEL_NAME = '3WLGNN'
#MODEL_NAME = 'RingGNN'
MODEL_NAME = 'GatedGCN'
#MODEL_NAME = 'GCN'
#MODEL_NAME = 'GAT'
#MODEL_NAME = 'GraphSage'
#MODEL_NAME = 'MLP'
#MODEL_NAME = 'GIN'
#MODEL_NAME = 'MoNet'
DATASET_NAME = 'SBM_CLUSTER'
#DATASET_NAME = 'SBM_PATTERN'
out_dir = 'out/SBMs_node_classification/'
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dataset = LoadData(DATASET_NAME)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
#MODEL_NAME = 'RingGNN'
MODEL_NAME = 'GatedGCN'
#MODEL_NAME = 'GCN'
#MODEL_NAME = 'GAT'
#MODEL_NAME = 'GraphSage'
#MODEL_NAME = 'MLP'
#MODEL_NAME = 'DiffPool'
#MODEL_NAME = 'GIN'
#MODEL_NAME = 'MoNet'
# """
# PARAMETERS
# """
if notebook_mode == True:
n_heads = -1
edge_feat = False
pseudo_dim_MoNet = -1
kernel = -1
gnn_per_block = -1
embedding_dim = -1
pool_ratio = -1
n_mlp_GIN = -1
gated = False
self_loop = False
#self_loop = True
max_time = 12
pos_enc = True
#pos_enc = False
pos_enc_dim = 10
if MODEL_NAME == 'GatedGCN':
seed=41; epochs=1000; batch_size=5; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=16; hidden_dim=70; out_dim=hidden_dim; dropout=0.0; readout='mean'
if MODEL_NAME == 'GCN':
seed=41; epochs=1000; batch_size=5; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; hidden_dim=146; out_dim=hidden_dim; dropout=0.0; readout='mean'
if MODEL_NAME == 'GAT':
seed=41; epochs=1000; batch_size=50; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; n_heads=8; hidden_dim=19; out_dim=n_heads*hidden_dim; dropout=0.0; readout='mean'
print('True hidden dim:',out_dim)
if MODEL_NAME == 'GraphSage':
seed=41; epochs=1000; batch_size=50; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; hidden_dim=108; out_dim=hidden_dim; dropout=0.0; readout='mean'
if MODEL_NAME == 'MLP':
seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
gated=False; # MEAN
L=4; hidden_dim=150; out_dim=hidden_dim; dropout=0.0; readout='mean'
gated=True; # GATED
L=4; hidden_dim=135; out_dim=hidden_dim; dropout=0.0; readout='mean'
if MODEL_NAME == 'DiffPool':
seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; hidden_dim=32; out_dim=hidden_dim; dropout=0.0; readout='mean'
n_heads=8; gnn_per_block=3; embedding_dim=32; batch_size=128; pool_ratio=0.15
if MODEL_NAME == 'GIN':
seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; hidden_dim=110; out_dim=hidden_dim; dropout=0.0; readout='mean'
n_mlp_GIN = 2; learn_eps_GIN=True; neighbor_aggr_GIN='sum'
if MODEL_NAME == 'MoNet':
seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='mean'
pseudo_dim_MoNet=2; kernel=3;
if MODEL_NAME == 'RingGNN':
seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
#L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'
L=4; hidden_dim=25; out_dim=hidden_dim; dropout=0.0
if MODEL_NAME == '3WLGNN':
seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0
L=3; hidden_dim=82; out_dim=hidden_dim; dropout=0.0
# generic new_params
net_params = {}
net_params['device'] = device
net_params['in_dim'] = torch.unique(trainset[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
net_params['hidden_dim'] = hidden_dim
net_params['out_dim'] = out_dim
num_classes = torch.unique(trainset[0][1],dim=0).size(0)
net_params['n_classes'] = num_classes
net_params['L'] = L # min L should be 2
net_params['readout'] = "mean"
net_params['layer_norm'] = True
net_params['batch_norm'] = True
net_params['in_feat_dropout'] = 0.0
net_params['dropout'] = 0.0
net_params['residual'] = True
net_params['edge_feat'] = edge_feat
net_params['self_loop'] = self_loop
# for MLPNet
net_params['gated'] = gated
# for GAT
net_params['n_heads'] = n_heads
# for graphsage
net_params['sage_aggregator'] = 'meanpool'
# specific for GIN
net_params['n_mlp_GIN'] = n_mlp_GIN
net_params['learn_eps_GIN'] = True
net_params['neighbor_aggr_GIN'] = 'sum'
# specific for MoNet
net_params['pseudo_dim_MoNet'] = pseudo_dim_MoNet
net_params['kernel'] = kernel
# specific for RingGNN
net_params['radius'] = 2
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
# specific for 3WLGNN
net_params['depth_of_mlp'] = 2
# specific for pos_enc_dim
net_params['pos_enc'] = pos_enc
net_params['pos_enc_dim'] = pos_enc_dim
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
if notebook_mode == True:
view_model_param(MODEL_NAME, net_params)
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['GatedGCN']:
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'])
print('Time PE:',time.time()-start0)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network # import train functions
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch)
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
!pip install nbconvert
import nbconvert
def main(notebook_mode=False,config=None):
"""
USER CONTROLS
"""
# terminal mode
if notebook_mode==False:
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# notebook mode
if notebook_mode:
# parameters
params = config['params']
# dataset
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
# device
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
out_dir = config['out_dir']
# GNN model
MODEL_NAME = config['model']
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
# SBM
net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
if notebook_mode==True:
config = {}
# gpu config
gpu = {}
gpu['use'] = use_gpu
gpu['id'] = gpu_id
config['gpu'] = gpu
# GNN model, dataset, out_dir
config['model'] = MODEL_NAME
config['dataset'] = DATASET_NAME
config['out_dir'] = out_dir
# parameters
params = {}
params['seed'] = seed
params['epochs'] = epochs
params['batch_size'] = batch_size
params['init_lr'] = init_lr
params['lr_reduce_factor'] = lr_reduce_factor
params['lr_schedule_patience'] = lr_schedule_patience
params['min_lr'] = min_lr
params['weight_decay'] = weight_decay
params['print_epoch_interval'] = 5
params['max_time'] = max_time
config['params'] = params
# network parameters
config['net_params'] = net_params
# convert to .py format
from utils.cleaner_main import *
cleaner_main('main_SBMs_node_classification')
main(True,config)
else:
main()
###Output
_____no_output_____ |
Missing Value Imputation - Categorical Variable.ipynb | ###Markdown
Missing Value Imputation - Categorical Variable
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv("d:\\train.csv")
df.head()
cat_vars=df.select_dtypes(include=["object"])
cat_vars.head()
per=cat_vars.isnull().mean()*100
per
drop_val=per[per>20].keys()
drop_val
cat_vars.drop(columns=drop_val,axis=1,inplace=True)
cat_vars
cat_vars
isnull_per=cat_vars.isnull().mean()*100
miss_vars=isnull_per[isnull_per>0].keys()
miss_vars
cat_vars['MasVnrType'].mode()
cat_vars['MasVnrType'].value_counts()
cat_vars["MasVnrType"].fillna(cat_vars["MasVnrType"].mode()[0])
cat_vars["MasVnrType"].isnull().sum()
for var in miss_vars:
cat_vars[var].fillna(cat_vars[var].mode()[0],inplace=True)
print(var,"=",cat_vars[var].mode()[0])
cat_vars.isnull().sum()
df.update(cat_vars)
df.drop(columns=drop_val,inplace=True)
df.select_dtypes(include="object").isnull().any(axis=1)
###Output
_____no_output_____ |
Spark_and_Python_For_Big_Data_with_PySpark/04-Spark_for_Machine_Learning/4-K-means_Clustering/Clustering_Code_Along.ipynb | ###Markdown
Clustering Code AlongWe'll be working with a real data set about seeds, from UCI repository: https://archive.ics.uci.edu/ml/datasets/seeds. The examined group comprised kernels belonging to three different varieties of wheat: Kama, Rosa and Canadian, 70 elements each, randomly selected for the experiment. High quality visualization of the internal kernel structure was detected using a soft X-ray technique. It is non-destructive and considerably cheaper than other more sophisticated imaging techniques like scanning microscopy or laser technology. The images were recorded on 13x18 cm X-ray KODAK plates. Studies were conducted using combine harvested wheat grain originating from experimental fields, explored at the Institute of Agrophysics of the Polish Academy of Sciences in Lublin. The data set can be used for the tasks of classification and cluster analysis.Attribute Information:To construct the data, seven geometric parameters of wheat kernels were measured: 1. area A, 2. perimeter P, 3. compactness C = 4*pi*A/P^2, 4. length of kernel, 5. width of kernel, 6. asymmetry coefficient 7. length of kernel groove. All of these parameters were real-valued continuous.Let's see if we can cluster them in to 3 groups with K-means!
###Code
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('cluster').getOrCreate()
dataset = spark.read.csv('seeds_dataset.csv', header=True, inferSchema=True)
dataset.printSchema()
dataset.head(1)
###Output
_____no_output_____
###Markdown
Format the Data
###Code
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler
dataset.columns
assembler = VectorAssembler(inputCols=dataset.columns, outputCol='features')
final_data = assembler.transform(dataset)
final_data.printSchema()
final_data.select(['features']).show()
###Output
+--------------------+
| features|
+--------------------+
|[15.26,14.84,0.87...|
|[14.88,14.57,0.88...|
|[14.29,14.09,0.90...|
|[13.84,13.94,0.89...|
|[16.14,14.99,0.90...|
|[14.38,14.21,0.89...|
|[14.69,14.49,0.87...|
|[14.11,14.1,0.891...|
|[16.63,15.46,0.87...|
|[16.44,15.25,0.88...|
|[15.26,14.85,0.86...|
|[14.03,14.16,0.87...|
|[13.89,14.02,0.88...|
|[13.78,14.06,0.87...|
|[13.74,14.05,0.87...|
|[14.59,14.28,0.89...|
|[13.99,13.83,0.91...|
|[15.69,14.75,0.90...|
|[14.7,14.21,0.915...|
|[12.72,13.57,0.86...|
+--------------------+
only showing top 20 rows
###Markdown
Scale the DataIt is a good idea to scale our data to deal with the curse of dimensionality: https://en.wikipedia.org/wiki/Curse_of_dimensionality
###Code
from pyspark.ml.feature import StandardScaler
scaler = StandardScaler(inputCol='features', outputCol='scaledFeatures')
# Compute summary statistics by fitting the StandardScaler
scaler_model = scaler.fit(final_data)
final_data = scaler_model.transform(final_data)
final_data.select(['features', 'scaledFeatures']).show()
###Output
+--------------------+--------------------+
| features| scaledFeatures|
+--------------------+--------------------+
|[15.26,14.84,0.87...|[5.24452795332028...|
|[14.88,14.57,0.88...|[5.11393027165175...|
|[14.29,14.09,0.90...|[4.91116018695588...|
|[13.84,13.94,0.89...|[4.75650503761158...|
|[16.14,14.99,0.90...|[5.54696468981581...|
|[14.38,14.21,0.89...|[4.94209121682475...|
|[14.69,14.49,0.87...|[5.04863143081749...|
|[14.11,14.1,0.891...|[4.84929812721816...|
|[16.63,15.46,0.87...|[5.71536696354628...|
|[16.44,15.25,0.88...|[5.65006812271202...|
|[15.26,14.85,0.86...|[5.24452795332028...|
|[14.03,14.16,0.87...|[4.82180387844584...|
|[13.89,14.02,0.88...|[4.77368894309428...|
|[13.78,14.06,0.87...|[4.73588435103234...|
|[13.74,14.05,0.87...|[4.72213722664617...|
|[14.59,14.28,0.89...|[5.01426361985209...|
|[13.99,13.83,0.91...|[4.80805675405968...|
|[15.69,14.75,0.90...|[5.39230954047151...|
|[14.7,14.21,0.915...|[5.05206821191403...|
|[12.72,13.57,0.86...|[4.37158555479908...|
+--------------------+--------------------+
only showing top 20 rows
###Markdown
Train the Model and Evaluate
###Code
# Trains a k-means model.
kmeans = KMeans(featuresCol='scaledFeatures', k=3)
model = kmeans.fit(final_data)
# Evaluate clustering by computing Within Set Sum of Squared Errors.
print('WSSSE')
print(model.computeCost(final_data))
print("Clusters' centers: ")
for center in model.clusterCenters():
print(center)
features_and_predictions = model.transform(final_data).select(['features', 'prediction'])
features_and_predictions.show()
###Output
+--------------------+----------+
| features|prediction|
+--------------------+----------+
|[15.26,14.84,0.87...| 2|
|[14.88,14.57,0.88...| 2|
|[14.29,14.09,0.90...| 2|
|[13.84,13.94,0.89...| 2|
|[16.14,14.99,0.90...| 2|
|[14.38,14.21,0.89...| 2|
|[14.69,14.49,0.87...| 2|
|[14.11,14.1,0.891...| 2|
|[16.63,15.46,0.87...| 0|
|[16.44,15.25,0.88...| 2|
|[15.26,14.85,0.86...| 2|
|[14.03,14.16,0.87...| 2|
|[13.89,14.02,0.88...| 2|
|[13.78,14.06,0.87...| 2|
|[13.74,14.05,0.87...| 2|
|[14.59,14.28,0.89...| 2|
|[13.99,13.83,0.91...| 2|
|[15.69,14.75,0.90...| 2|
|[14.7,14.21,0.915...| 2|
|[12.72,13.57,0.86...| 1|
+--------------------+----------+
only showing top 20 rows
|
lab-auto-scale/stress-total-loss.ipynb | ###Markdown
Auto Scaling LabThis notebook walks you through how to configure and execute auto scaling on a SageMaker endpoint.
###Code
import threading
import numpy as np
import time
import math
from multiprocessing.pool import ThreadPool
from sagemaker.tensorflow.model import TensorFlowPredictor
from sagemaker.estimator import Estimator
###Output
_____no_output_____
###Markdown
Deploy or attach to your endpointThe lab has a dependency on the prior lab involving bringing your own TensorFlow script. To get started, we first attach to the existing endpoint from the prior lab. If the endpoint has already been deleted, we re-deploy it based on the name used earlier for the training job.To locate your specific training job, go back to your notebook from the earlier lab and look at the cell output from the `fit` method. It will show you the specific training job name. **Enter that as `ENDPOINT_NAME` in this cell before proceeding**. This ensures we use the same model you trained earlier.
###Code
SERVE_INSTANCE_TYPE = 'ml.c5.xlarge'
TRAINING_JOB_NAME = '<your training job name goes here>'
ENDPOINT_NAME = TRAINING_JOB_NAME
NOT_RUNNING = True
import sagemaker
from sagemaker import get_execution_role
import boto3
sess = sagemaker.Session()
role = get_execution_role()
bucket = sess.default_bucket()
if (NOT_RUNNING):
from sagemaker.tensorflow.serving import Model
model = Model(model_data=f's3://{bucket}/{TRAINING_JOB_NAME}/output/model.tar.gz',
role=f'{role}')
loss_predictor = model.deploy(initial_instance_count=1,
instance_type=SERVE_INSTANCE_TYPE)
else:
loss_predictor = TensorFlowPredictor(endpoint_name=ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Now that the endpoint is available, prepare a single payload that we will use in the simple stress test. The actual values do not matter, as we are just trying to simulate load.
###Code
X = [ 1.05332958, -0.53354753, -0.69436208, -2.21762908, -3.20396808, 1.03539088,
1.20417872, -1.03589941, -0.35095956, -0.01160373, -0.1615418, -0.20454251,
-0.72053914]
print(str(X))
###Output
_____no_output_____
###Markdown
Define a simple function for making a prediction. Track the elapsed time and return that as seconds.
###Code
def predict(payload):
elapsed_time = time.time()
results = loss_predictor.predict(X)
elapsed_time = time.time() - elapsed_time
prediction = results['predictions'][0][0]
return elapsed_time
###Output
_____no_output_____
###Markdown
Make sure a single prediction is working against the endpoint before proceeding to generate load for auto scaling.
###Code
predict(X)
###Output
_____no_output_____
###Markdown
Configure auto scaling on your endpointFollow these steps to configure auto scaling.1. In a new browser tab, navigate to the `Endpoints` section of the SageMaker console. 2. Navigate to the details page for the endpoint. 3. Under the `Endpoint runtime settings`, select the one and only variant that was created for this endpoint (it is named `All traffic` by default).4. Click on `Configure auto scaling` in the upper right of `Endpoint runtime settings`.5. Under `Variant automatic scaling`, set the maximum number of instances to `2`.6. Under `Built in scaling policy`, set the target to track to `2000` for the `SageMakerVariantInvocationsPerInstance` metric. 7. Click `Save` at the bottom of the page.8. You will be returned to the endpoint detail page and should see a message at the top of the page in a green bar saying `Automatic scaling was configured for variant AllTraffic`.You have now set a threshold that will be used by SageMaker to determine when to add more instances. If it detects more invocations per instance per minute than the threshold, more instances will be added in an attempt to distribute the load and reduce that metric to the target. We have purposely set the threshold to a low number so that we can more easily trigger scaling. In practice, you will need to perform testing and analysis to determine an appropriate trigger and the right number of instances for your workload.See the detailed documentation on SageMaker auto scaling [here](https://docs.aws.amazon.com/sagemaker/latest/dg/endpoint-auto-scaling.html). Execute stress tests to force auto scalingNow that the endpoint has auto scaling configured, lets drive some inference traffic against the endpoint. We use multiple client threads to drive sufficient volume of requests to trigger SageMaker auto scaling. The number of requests are mapped across a set of threads. Resulting elasped times are summed and returned.
###Code
def run_test(max_threads, max_requests):
pool = ThreadPool(max_threads)
bunch_of_x = []
for i in range(max_requests):
bunch_of_x.append(X)
result = pool.map(predict, bunch_of_x)
pool.close()
pool.join()
elapsedtime = 0
for i in result:
elapsedtime += i
elapsedtime = np.sum(result)
return elapsedtime
###Output
_____no_output_____
###Markdown
Drive a few short testsWe run a few tests to allow us to start seeing invocation metrics in CloudWatch. This will help you visualize how traffic ramps up on your single instance endpoint, and is eventually distributed across a cluster of instances.
###Code
%%time
print('Starting test 0')
run_test(5, 10)
%%time
print('Starting test 1')
run_test(10, 250)
%%time
print('Starting test 2')
run_test(10, 1000)
###Output
_____no_output_____
###Markdown
Observe auto scalingTo trigger auto scaling, kick off one more round of tests. While that is running, read the instructions in the subsequent cell. It explains how to confirm that auto scaling worked.
###Code
%%time
print('Starting test 3')
run_test(10, 600000)
###Output
_____no_output_____
###Markdown
In the endpoint details console, you should still see the `Desired instance count` as `1`, since the scaling threshold has not been reached. This next test will continuously send traffic to the endpoint for about 15 minutes. During this time, we'll see the invocations per instance rise. Invocations per instance will track exactly the same as the total invocations until auto scaling happens, since we only have a single instance to start with. Note that in practice, you would want to start with at least two instances. This ensures you have higher availability by leveraging multiple availability zones.Auto scaling should trigger once the threshold is met. In our case, the threshold for the alarm in CloudWatch is InvocationsPerInstance > 2,000 for 3 datapoints within 3 minutes. This ensures an alarm is not triggered for a short spike in traffic.Once auto scaling is triggered, SageMaker will take several minutes to add new instances (in our case, just one). While the auto scaling is happening, the endpoint details console will show you that the new desired instance count has increased to two. There will also be a blue bar at the top of the console indicating that the endpoint is being updated. Eventually that banner turns green and indicates that the `Endpoint was successfully updated.`Once the expanded set of instances is running, click on `Invocation metrics` from the endpoint details console. This takes you to CloudWatch to show graphs of those metrics. Select two metrics: `Invocations` and `InvocationsPerInstance`. Next, click on the `Graphed metrics` tab, and update the `Statistics` to be `Sum`, and the `Period` to be `1 second`. At the top of the chart, set the time period to 30 minutes (using the `custom` drop down).For the time periods before the second instance was automatically added, the invocations will be exactly the same as the invocations perinstance.Once the auto scaling has happened, you will now see the total number of invocations continue at the same pace as before, yet the *invocations per instance* will be cut in half, as SageMaker automatically distributes the load ascross the cluster.  Scaling back in (optional)For extra credit, you can observe SageMaker scaling in (reducing the number of instances) the infrastructure. This will take about 15 minutes after the previous traffic generator is complete. At that point, you should see a scale in event. SageMaker detects the invocations per instance is below the threshold, and automatically reduces the number of instances to avoid being over-provisioned. Cool down parameters are available to control how aggressively SageMaker adds or removes instances.To ensure the CloudWatch alarm scale is triggered, there needs to be at least some traffic to have sufficient data points for the alarm. Here we generate a small load.
###Code
%%time
print('Adding a few invocations every 30s for 15 mins')
for i in range(30):
run_test(10, 100)
time.sleep(30)
###Output
_____no_output_____
###Markdown
Delete the endpointDelete the endpoint, which will take down all of the instances.
###Code
sagemaker.Session().delete_endpoint(loss_predictor.endpoint)
###Output
_____no_output_____ |
site/en-snapshot/guide/basic_training_loops.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Basic training loops View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models.TensorFlow also includes the [tf.Keras API](keras/overview.ipynb), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes. Setup
###Code
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Solving machine learning problemsSolving a machine learning problem usually consists of the following steps: - Obtain training data. - Define the model. - Define a loss function. - Run through the training data, calculating loss from the ideal value - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data. - Evaluate your results.For illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias).This is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regressionSimple_and_multiple_linear_regression). DataSupervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can prediect the value of an output from an input.Each input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor.Here is some data synthesized by adding Gaussian (Normal) noise to points along a line.
###Code
# The actual line
TRUE_W = 3.0
TRUE_B = 2.0
NUM_EXAMPLES = 1000
# A vector of random x values
x = tf.random.normal(shape=[NUM_EXAMPLES])
# Generate some noise
noise = tf.random.normal(shape=[NUM_EXAMPLES])
# Calculate y
y = x * TRUE_W + TRUE_B + noise
# Plot all the data
import matplotlib.pyplot as plt
plt.scatter(x, y, c="b")
plt.show()
###Output
_____no_output_____
###Markdown
Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch. Define the modelUse `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details.Use `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved.Here, you define both *w* and *b* as variables.
###Code
class MyModel(tf.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.w * x + self.b
model = MyModel()
# List the variables tf.modules's built-in variable aggregation.
print("Variables:", model.variables)
# Verify the model works
assert model(3.0).numpy() == 15.0
###Output
_____no_output_____
###Markdown
The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras. Define a loss functionA loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the "mean squared" error:
###Code
# This computes a single loss value for an entire batch
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(target_y - predicted_y))
###Output
_____no_output_____
###Markdown
Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue:
###Code
plt.scatter(x, y, c="b")
plt.scatter(x, model(x), c="r")
plt.show()
print("Current loss: %1.6f" % loss(model(x), y).numpy())
###Output
_____no_output_____
###Markdown
Define a training loopThe training loop consists of repeatedly doing three tasks in order:* Sending a batch of inputs through the model to generate outputs* Calculating the loss by comparing the outputs to the output (or label)* Using gradient tape to find the gradients* Optimizing the variables with those gradientsFor this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent).There are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`):
###Code
# Given a callable model, inputs, outputs, and a learning rate...
def train(model, x, y, learning_rate):
with tf.GradientTape() as t:
# Trainable variables are automatically tracked by GradientTape
current_loss = loss(y, model(x))
# Use GradientTape to calculate the gradients with respect to W and b
dw, db = t.gradient(current_loss, [model.w, model.b])
# Subtract the gradient scaled by the learning rate
model.w.assign_sub(learning_rate * dw)
model.b.assign_sub(learning_rate * db)
###Output
_____no_output_____
###Markdown
For a look at training, you can send the same batch of *x* an *y* through the training loop, and see how `W` and `b` evolve.
###Code
model = MyModel()
# Collect the history of W-values and b-values to plot later
Ws, bs = [], []
epochs = range(10)
# Define a training loop
def training_loop(model, x, y):
for epoch in epochs:
# Update the model with the single giant batch
train(model, x, y, learning_rate=0.1)
# Track this before I update
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
current_loss = loss(y, model(x))
print("Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f" %
(epoch, Ws[-1], bs[-1], current_loss))
print("Starting: W=%1.2f b=%1.2f, loss=%2.5f" %
(model.w, model.b, loss(y, model(x))))
# Do the training
training_loop(model, x, y)
# Plot it
plt.plot(epochs, Ws, "r",
epochs, bs, "b")
plt.plot([TRUE_W] * len(epochs), "r--",
[TRUE_B] * len(epochs), "b--")
plt.legend(["W", "b", "True W", "True b"])
plt.show()
# Visualize how the trained model performs
plt.scatter(x, y, c="b")
plt.scatter(x, model(x), c="r")
plt.show()
print("Current loss: %1.6f" % loss(model(x), y).numpy())
###Output
_____no_output_____
###Markdown
The same solution, but with KerasIt's useful to contrast the code above with the equivalent in Keras.Defining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module.
###Code
class MyModelKeras(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x, **kwargs):
return self.w * x + self.b
keras_model = MyModelKeras()
# Reuse the training loop with a Keras model
training_loop(keras_model, x, y)
# You can also save a checkpoint using Keras's built-in support
keras_model.save_weights("my_checkpoint")
###Output
_____no_output_____
###Markdown
Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops.If you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them.
###Code
keras_model = MyModelKeras()
# compile sets the training paramaeters
keras_model.compile(
# By default, fit() uses tf.function(). You can
# turn that off for debugging, but it is on now.
run_eagerly=False,
# Using a built-in optimizer, configuring as an object
optimizer=tf.keras.optimizers.SGD(learning_rate=0.1),
# Keras comes with built-in MSE error
# However, you could use the loss function
# defined above
loss=tf.keras.losses.mean_squared_error,
)
###Output
_____no_output_____
###Markdown
Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32.In this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000.
###Code
print(x.shape[0])
keras_model.fit(x, y, epochs=10, batch_size=1000)
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Basic training loops View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models.TensorFlow also includes the [tf.Keras API](https://www.tensorflow.org/guide/keras/overview), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes. Setup
###Code
import tensorflow as tf
import matplotlib.pyplot as plt
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
###Output
_____no_output_____
###Markdown
Solving machine learning problemsSolving a machine learning problem usually consists of the following steps: - Obtain training data. - Define the model. - Define a loss function. - Run through the training data, calculating loss from the ideal value - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data. - Evaluate your results.For illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias).This is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regressionSimple_and_multiple_linear_regression). DataSupervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can predict the value of an output from an input.Each input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor.Here is some data synthesized by adding Gaussian (Normal) noise to points along a line.
###Code
# The actual line
TRUE_W = 3.0
TRUE_B = 2.0
NUM_EXAMPLES = 201
# A vector of random x values
x = tf.linspace(-2,2, NUM_EXAMPLES)
x = tf.cast(x, tf.float32)
def f(x):
return x * TRUE_W + TRUE_B
# Generate some noise
noise = tf.random.normal(shape=[NUM_EXAMPLES])
# Calculate y
y = f(x) + noise
# Plot all the data
plt.plot(x, y, '.')
plt.show()
###Output
_____no_output_____
###Markdown
Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch. Define the modelUse `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details.Use `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved.Here, you define both *w* and *b* as variables.
###Code
class MyModel(tf.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.w * x + self.b
model = MyModel()
# List the variables tf.modules's built-in variable aggregation.
print("Variables:", model.variables)
# Verify the model works
assert model(3.0).numpy() == 15.0
###Output
_____no_output_____
###Markdown
The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras. Define a loss functionA loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the "mean squared" error:
###Code
# This computes a single loss value for an entire batch
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(target_y - predicted_y))
###Output
_____no_output_____
###Markdown
Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue:
###Code
plt.plot(x, y, '.', label="Data")
plt.plot(x, f(x), label="Ground truth")
plt.plot(x, model(x), label="Predictions")
plt.legend()
plt.show()
print("Current loss: %1.6f" % loss(y, model(x)).numpy())
###Output
_____no_output_____
###Markdown
Define a training loopThe training loop consists of repeatedly doing three tasks in order:* Sending a batch of inputs through the model to generate outputs* Calculating the loss by comparing the outputs to the output (or label)* Using gradient tape to find the gradients* Optimizing the variables with those gradientsFor this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent).There are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`):
###Code
# Given a callable model, inputs, outputs, and a learning rate...
def train(model, x, y, learning_rate):
with tf.GradientTape() as t:
# Trainable variables are automatically tracked by GradientTape
current_loss = loss(y, model(x))
# Use GradientTape to calculate the gradients with respect to W and b
dw, db = t.gradient(current_loss, [model.w, model.b])
# Subtract the gradient scaled by the learning rate
model.w.assign_sub(learning_rate * dw)
model.b.assign_sub(learning_rate * db)
###Output
_____no_output_____
###Markdown
For a look at training, you can send the same batch of *x* and *y* through the training loop, and see how `W` and `b` evolve.
###Code
model = MyModel()
# Collect the history of W-values and b-values to plot later
weights = []
biases = []
epochs = range(10)
# Define a training loop
def report(model, loss):
return f"W = {model.w.numpy():1.2f}, b = {model.b.numpy():1.2f}, loss={current_loss:2.5f}"
def training_loop(model, x, y):
for epoch in epochs:
# Update the model with the single giant batch
train(model, x, y, learning_rate=0.1)
# Track this before I update
weights.append(model.w.numpy())
biases.append(model.b.numpy())
current_loss = loss(y, model(x))
print(f"Epoch {epoch:2d}:")
print(" ", report(model, current_loss))
###Output
_____no_output_____
###Markdown
Do the training
###Code
current_loss = loss(y, model(x))
print(f"Starting:")
print(" ", report(model, current_loss))
training_loop(model, x, y)
###Output
_____no_output_____
###Markdown
Plot the evolution of the weights over time:
###Code
plt.plot(epochs, weights, label='Weights', color=colors[0])
plt.plot(epochs, [TRUE_W] * len(epochs), '--',
label = "True weight", color=colors[0])
plt.plot(epochs, biases, label='bias', color=colors[1])
plt.plot(epochs, [TRUE_B] * len(epochs), "--",
label="True bias", color=colors[1])
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Visualize how the trained model performs
###Code
plt.plot(x, y, '.', label="Data")
plt.plot(x, f(x), label="Ground truth")
plt.plot(x, model(x), label="Predictions")
plt.legend()
plt.show()
print("Current loss: %1.6f" % loss(model(x), y).numpy())
###Output
_____no_output_____
###Markdown
The same solution, but with KerasIt's useful to contrast the code above with the equivalent in Keras.Defining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module.
###Code
class MyModelKeras(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be randomly initialized
self.w = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def call(self, x):
return self.w * x + self.b
keras_model = MyModelKeras()
# Reuse the training loop with a Keras model
training_loop(keras_model, x, y)
# You can also save a checkpoint using Keras's built-in support
keras_model.save_weights("my_checkpoint")
###Output
_____no_output_____
###Markdown
Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops.If you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them.
###Code
keras_model = MyModelKeras()
# compile sets the training parameters
keras_model.compile(
# By default, fit() uses tf.function(). You can
# turn that off for debugging, but it is on now.
run_eagerly=False,
# Using a built-in optimizer, configuring as an object
optimizer=tf.keras.optimizers.SGD(learning_rate=0.1),
# Keras comes with built-in MSE error
# However, you could use the loss function
# defined above
loss=tf.keras.losses.mean_squared_error,
)
###Output
_____no_output_____
###Markdown
Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32.In this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000.
###Code
print(x.shape[0])
keras_model.fit(x, y, epochs=10, batch_size=1000)
###Output
_____no_output_____ |
database/08_pymongo_zigbang(git).ipynb | ###Markdown
zigbang 매물 데이터 저장- pip install geohash2
###Code
import warnings
warnings.filterwarnings('ignore')
import zigbang as zb
import pymongo
import pandas as pd
# server 연결
server = pymongo.MongoClient('mongodb://user:passwd@ip:27017/')
db = server.zigbang
addrs = {
"seongnam": "성남동",
"dangsan": "당산동",
"hapjung": "합정동",
"mongwon": "망원동",
"sujin": "수진동",
"yeungdeungpo":"영등포동"
}
# 데이터 수집 후 저장
for collection_name, addr in addrs.items():
collection = db[collection_name]
datas = zb.oneroom(addr)
ids = collection.insert(datas)
print(collection_name, addr, len(ids))
# 성남동에서 월세 50이상 보증금 5000에서 10000조건으로 검색
QUERY = {"rent": {"$lte": 50}, "deposit": {"$lte": 10000, "$gte": 5000}}
results = db["sujin"].find(QUERY)
df = pd.DataFrame(results).tail()
columns = ["title", "service_type", "sales_type", "deposit", "rent", "size_m2", "floor", "building_floor", "address1", "manage_cost", "is_new"]
df[columns]
# 컬렉션 삭제
for addr in addrs:
print(addr)
server.zigbang.drop_collection(addr)
# 데이터 베이스 삭제
server.drop_database("zigbang")
###Output
_____no_output_____ |
excs/exc03_introduction_to_keras_and_tf.ipynb | ###Markdown
연습문제: 3장 케라스와 텐서플로우 저수준 선형 분류 신경망 구현 순수 텐서플로우 API만을 이용하여 두 개의 층을 갖는 선형 분류 신경망을 구현하라.
###Code
import tensorflow as tf
import numpy as np
###Output
_____no_output_____
###Markdown
데이터셋 생성 - `np.random.multivariate_normal()` - 다변량 정규분포를 따르는 데이터 생성 - 평균값과 공분산 지정 필요- 음성 데이터셋 - 샘플 수: 1,000 - 평균값: `[0, 3]` - 공분산: `[[1, 0.5],[0.5, 1]]`- 양성 데이터셋 - 샘플 수: 1,000 - 평균값: `[3, 0]` - 공분산: `[[1, 0.5],[0.5, 1]]`
###Code
num_samples_per_class = 1000
# 음성 데이터셋
negative_samples = np.random.multivariate_normal(
mean=[0, 3], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)
# 양성 데이터셋
positive_samples = np.random.multivariate_normal(
mean=[3, 0], cov=[[1, 0.5],[0.5, 1]], size=num_samples_per_class)
###Output
_____no_output_____
###Markdown
두 개의 `(1000, 2)` 모양의 양성, 음성 데이터셋을 하나의 `(2000, 2)` 모양의 데이터셋으로 합치면서동시에 자료형을 `np.float32`로 지정한다. 자료형을 지정하지 않으면 `np.float64`로 지정되어 보다 많은 메모리와 실행시간을 요구한다.
###Code
inputs = np.vstack((negative_samples, positive_samples)).astype(np.float32)
###Output
_____no_output_____
###Markdown
음성 샘플의 타깃은 0, 양성 샘플의 타깃은 1로 지정한다.
###Code
targets = np.vstack((np.zeros((num_samples_per_class, 1), dtype="float32"),
np.ones((num_samples_per_class, 1), dtype="float32")))
###Output
_____no_output_____
###Markdown
양성, 음성 샘플을 색깔로 구분하면 다음과 같다.- `inputs[:, 0]`: x 좌표- `inputs[:, 1]`: x 좌표- `c=targets[:, 0]`: 0 또는 1에 따른 색상 지정
###Code
import matplotlib.pyplot as plt
plt.scatter(inputs[:, 0], inputs[:, 1], c=targets[:, 0])
plt.show()
###Output
_____no_output_____
###Markdown
가중치 변수 텐서 생성
###Code
inter_layers_dim1 = 5
input_dim1 = 2 # 입력 샘플의 특성수
output_dim1 = inter_layers_dim1 # 출력 샘플의 특성수
# 가중치: 무작위 초기화
W1 = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim1, output_dim1)))
# 편향: 0으로 초기화
b1 = tf.Variable(initial_value=tf.zeros(shape=(output_dim1,)))
input_dim2 = inter_layers_dim1 # 입력 샘플의 특성수
output_dim2 = 1 # 하나의 값으로 출력
# 가중치: 무작위 초기화
W2 = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim2, output_dim2)))
# 편향: 0으로 초기화
b2 = tf.Variable(initial_value=tf.zeros(shape=(output_dim2,)))
###Output
_____no_output_____
###Markdown
예측 모델(함수) 선언아래 함수는 하나의 층을 사용하는 모델의 출력값을 계산하는 과정이다.
###Code
def layer1(inputs, activation=None):
outputs = tf.matmul(inputs, W1) + b1
if activation != None:
return activation(outputs)
else:
return outputs
def layer2(inputs, activation=None):
outputs = tf.matmul(inputs, W2) + b2
if activation != None:
return activation(outputs)
else:
return outputs
def model(inputs):
layer1_outputs = layer1(inputs, tf.nn.relu)
layer2_outputs = layer2(layer1_outputs)
return layer2_outputs
###Output
_____no_output_____
###Markdown
손실 함수: 평균 제곱 오차(MSE)- `tf.reduce_mean()`: 텐서에 포함된 항목들의 평균값 계산. 넘파이의 `np.mean()`과 결과는 동일하지만 텐서플로우의 텐서를 대상으로 함.
###Code
def square_loss(targets, predictions):
per_sample_losses = tf.square(targets - predictions)
return tf.reduce_mean(per_sample_losses)
###Output
_____no_output_____
###Markdown
훈련 단계하나의 배치에 대해 예측값을 계산한 후에 손실 함수의 그레이디언트를 이용하여 가중치와 편향을 업데이트한다.
###Code
learning_rate = 0.1
def training_step(inputs, targets):
with tf.GradientTape() as tape:
predictions = model(inputs)
loss = square_loss(predictions, targets)
grad_loss_wrt_W1, grad_loss_wrt_b1, grad_loss_wrt_W2, grad_loss_wrt_b2 = tape.gradient(loss, [W1, b1, W2, b2])
W1.assign_sub(grad_loss_wrt_W1 * learning_rate)
b1.assign_sub(grad_loss_wrt_b1 * learning_rate)
W2.assign_sub(grad_loss_wrt_W2 * learning_rate)
b2.assign_sub(grad_loss_wrt_b2 * learning_rate)
return loss
###Output
_____no_output_____
###Markdown
배치 훈련배치 훈련을 총 100번 반복한다.
###Code
for step in range(100):
loss = training_step(inputs, targets)
if step % 10 == 0:
print(f"Loss at step {step}: {loss:.4f}")
###Output
Loss at step 0: 14.9883
Loss at step 10: 0.2718
Loss at step 20: 0.2498
Loss at step 30: 0.2459
Loss at step 40: 0.2336
Loss at step 50: 0.1279
Loss at step 60: 0.0581
Loss at step 70: 0.0441
Loss at step 80: 0.0397
Loss at step 90: 0.0373
###Markdown
훈련을 보다 더 해볼 수도 있어 보인다. 600번 정도 더 훈련하면 손실값이 정체하기 시작한다.
###Code
for step in range(1000):
loss = training_step(inputs, targets)
if step % 100 == 0:
print(f"Loss at step {step}: {loss:.4f}")
###Output
Loss at step 0: 0.0354
Loss at step 100: 0.0245
Loss at step 200: 0.0205
Loss at step 300: 0.0197
Loss at step 400: 0.0195
Loss at step 500: 0.0195
Loss at step 600: 0.0194
Loss at step 700: 0.0194
Loss at step 800: 0.0194
Loss at step 900: 0.0194
###Markdown
예측
###Code
predictions = model(inputs)
###Output
_____no_output_____
###Markdown
예측 결과를 확인하면 다음과 같다.예측값이 0.5보다 클 때 양성으로 판정한다.
###Code
plt.scatter(inputs[:, 0], inputs[:, 1], c=predictions[:, 0] > 0.5)
plt.show()
###Output
_____no_output_____ |
samples/getting-started/azure-quantum/provider-specific/Getting-started-with-Honeywell-and-OpenQASM-2.0-on-Azure-Quantum.ipynb | ###Markdown
Getting started with Honeywell and OpenQASM 2.0 on Azure QuantumThis notebook shows how to send a basic quantum circuit expressed using the [OpenQASM 2.0 spec](https://github.com/Qiskit/openqasm/tree/OpenQASM2.x) to a Honeywell target via the Azure Quantum service. First, install `azure-quantum` and optionally `matplotlib` for plotting:
###Code
# To install the Azure Quantum client package, uncomment and run the line below:
#
# !pip install azure-quantum==0.19.2109.165653 --quiet
#
# We also recommend installing matplotlib, if you don't have it installed already:
# !pip install matplotlib --quiet
###Output
_____no_output_____
###Markdown
Connecting to the Azure Quantum serviceTo connect to the Azure Quantum service, find the resource ID and location of your Workspace from the Azure portal here: https://portal.azure.com. Navigate to your Azure Quantum workspace and copy the values from the header.
###Code
from azure.quantum import Workspace
from azure.quantum.target import Honeywell
# Enter your workspace details here
# Find your resource ID and location via portal.azure.com
workspace = Workspace(
resource_id="",
location=""
)
###Output
_____no_output_____
###Markdown
Use `workspace.get_targets` to see what targets are currently available for the Honeywell provider, including wait times. Running this method will trigger authentication to your Microsoft account, if you're not already logged in.
###Code
workspace.get_targets(provider_id="honeywell")
###Output
_____no_output_____
###Markdown
Submit a quantum circuit to the Honeywell API validatorNote: The [Honeywell API validator](https://docs.microsoft.com/azure/quantum/provider-honeywellapi-validator) target will always return 0 on measurement.Create a quantum circuit using the [OpenQASM 2.0 spec](https://github.com/Qiskit/openqasm/tree/OpenQASM2.x) representation. For example, the following example creates a Teleportation circuit:
###Code
# Create raw OpenQASM circuit.
circuit = """OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg c0[1];
creg c1[3];
h q[0];
cx q[0], q[1];
x q[2];
h q[2];
cx q[2], q[0];
h q[2];
measure q[0] -> c1[0];
c0[0] = c1[0];
if (c0==1) x q[1];
c0[0] = 0;
measure q[2] -> c1[1];
c0[0] = c1[1];
if (c0==1) z q[1];
c0[0] = 0;
h q[1];
measure q[1] -> c1[2];
"""
###Output
_____no_output_____
###Markdown
To see if this circuit is valid, we can submit it to the Honeywell API validator target. The following example uses the Honeywell API validator, which returns a Job object. For more information, see [Azure Quantum Job](https://review.docs.microsoft.com/en-us/azure/quantum/optimization-job-reference).
###Code
target = workspace.get_targets(name="honeywell.hqs-lt-s1-apival")
job = target.submit(circuit)
###Output
_____no_output_____
###Markdown
Wait until the job is complete and then fetch the results.
###Code
results = job.get_results()
results
###Output
.......
###Markdown
Run on Honeywell Simulator It looks like the program was indeed valid! Now we can run the circuit and simulate the result with the Honeywell simulator target:
###Code
target = Honeywell(workspace=workspace, name="honeywell.hqs-lt-s1-sim")
job = target.submit(circuit)
###Output
_____no_output_____
###Markdown
Wait until the job is complete and then fetch the results.
###Code
results = job.get_results()
results
###Output
...................
{'c0': ['0'], 'c1': ['111']}
|
300_task_activity/100_face_deviations_unfam/02_setup_timing.ipynb | ###Markdown
Please get the raw, predicted, and residual features from `120_features` folder.This will save demographics and trait measures. The demo and trait measures will be run in separate analyses.
###Code
import os
import pandas as pd
import numpy as np
import readline
import rpy2
import rpy2.robjects as robjects
r = robjects.r
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
from sklearn.preprocessing import scale
###Output
_____no_output_____
###Markdown
Run
###Code
def load_dat(timing):
# Read in data
dfa = pd.read_csv("measures/z_mean_vid_vals.csv")
dfb = pd.read_csv("measures/z_mean_rel_vid_vals.csv")
df = pd.concat([dfa.ix[:,1:-2],dfb.ix[:,1:-1]], axis=1)
df = df.ix[:,df.columns != "mean_fds"]
# Get the video names
# We want to reorder the dataframe above based on the timing info
feat_vnames = dfa.ix[:,-1]
inds = [ (x == feat_vnames).nonzero()[0][0] for x in timing.video ]
# Extract
df_cols = df.columns
df_dat = df.ix[inds,:]
# Make matrix
df_dat = df_dat.as_matrix()
# Center the columns
df_dat = scale(df_dat, with_mean=True, with_std=False)
return (df_cols, df_dat)
def face_activity(runs, onsets):
uruns = np.unique(runs)
nruns = uruns.shape[0]
afni_facemat = []
for ri in range(nruns):
run_inds = runs == uruns[ri]
n = np.sum(run_inds)
ovec = onsets[run_inds].astype('float32').round(4)
row = [ '%.5f' % ovec[i] for i in range(n) ]
row = " ".join(row)
afni_facemat.append(row)
return np.array(afni_facemat)
def question_activity(runs, onsets, q_regressor):
uruns = np.unique(runs)
afni_qmat = []
nruns = uruns.shape[0]
for ri in range(nruns):
run_inds = runs == uruns[ri]
n = np.sum(run_inds)
qvec = q_regressor[run_inds]
ovec = onsets[run_inds].astype('float32').round(4)
row = np.array([ '%.5f' % ovec[i] for i,touse in enumerate(qvec) if touse == 1 ])
if len(row) == 0:
row = '*'
else:
row = " ".join(row)
afni_qmat.append(row)
return np.array(afni_qmat)
def motion_covars(subj):
funcdir = "/data1/famface01/analysis/preprocessed/%s/func" % subj
df_paths = pd.read_table("%s/df_paths.txt" % funcdir, sep=" ")
inds = df_paths.inindex[df_paths.name == 'unfam_vids']
motion_fpaths = [ "%s/mc/func_run%02i_dfile.1D" % (funcdir, ind) for ind in inds ]
from sklearn.preprocessing import scale
motion_mats = []
for fpath in motion_fpaths:
x = np.loadtxt(fpath)
x = scale(x, with_std=False, with_mean=True)
motion_mats.append(x)
motion_mat = np.vstack(motion_mats)
return motion_mat
def am_activity(runs, onsets, df_mat):
uruns = np.unique(runs)
nruns = uruns.shape[0]
afni_mats = []
for ci in range(df_mat.shape[1]):
afni_mat = []
for ri in range(nruns):
run_inds = runs == uruns[ri]
n = np.sum(run_inds)
ovecs= onsets[run_inds].astype('float32').round(4)
dvecs= df_mat[run_inds,ci]
row = [ '%.5f*%f' % (ovecs[i],dvecs[i]) for i in range(n) ]
row = " ".join(row)
afni_mat.append(row)
afni_mats.append(np.array(afni_mat))
return afni_mats
# Skip the first subject...for now
for si in range(6):
subj = "sub%02i" % (si+1)
print(subj)
# Load the R data
infile = "/data1/famface01/analysis/encoding/ShapeAnalysis/data/roi_n_more_%s.rda" % subj
r.load(infile)
# Variables
onsets = np.array(r.dat.rx2('basics').rx2('timing').rx2('onset'))
questions = np.array(r['as.character'](r.dat.rx2('basics').rx2('timing').rx2('question')))
runs = np.array(r.dat.rx2('basics').rx2('timing').rx2('run'))
uruns = np.unique(runs)
timing = pandas2ri.ri2py(r.dat.rx2('basics').rx2('timing'))
# Get data
dat_cols, dat = load_dat(timing)
###
# ACTIVITY
###
# face
afni_facemat = face_activity(runs, onsets)
# questions
q_regressor = (questions != 'none') * 1
afni_qmat = question_activity(runs, onsets, q_regressor)
# motion
motion_mat = motion_covars(subj)
# pose/shape/etc
shape_dat = am_activity(runs, onsets, dat)
###
# SAVE
###
base = "/data1/famface01/command/misc/face_representations"
outbase = "%s/300_task_activity/100_face_deviations_unfam/timings" % base
outdir = "%s/%s" % (outbase, subj)
print outdir
if not os.path.exists(outdir):
os.mkdir(outdir)
# Faces
ofname = '%s/stim_faces.txt' % outdir
np.savetxt(ofname, afni_facemat, fmt='%s')
# Measures
for i,amat in enumerate(shape_dat):
cname = dat_cols[i]
ofname = '%s/stimam_%s.txt' % (outdir, cname)
np.savetxt(ofname, amat, fmt='%s')
# Questions
ofname = '%s/stim_questions.txt' % outdir
np.savetxt(ofname, afni_qmat, fmt='%s')
# MOTION
ofname = '%s/motion.1D' % outdir
np.savetxt(ofname, motion_mat, fmt='%f')
load_dat(timing)
###Output
_____no_output_____ |
general_assembly/01_welcome_to_data_science/solution-code-1.ipynb | ###Markdown
Check to see if you're ready to go on Thursday!1. Run each block of code2. Check for errors3. When you think you're error free, flag down a teaching team member to confirm!
###Code
###This is what an error looks like
print(a)
###Output
_____no_output_____
###Markdown
Objectives Get comfortable with IPython Notebook* How to start IPython Notebook* How to read data into pandas* How to do simple manipulations on pandas dataframes Start a notebook:For each class, we'll be using a set of common data science libraries and tools, like the IPython notebook. You can start an IPython notebook by running```jupyter notebook $NAME_OF_FILE``` Try it yourself!Read and run the block of code below by: 1. Clicking on it and pressing the play button above or2. Using a short cut- (help --> keyboard shortcuts)
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
mpl.rcParams['figure.figsize'] = (15, 6)
pd.set_option('display.width', 4000)
pd.set_option('display.max_columns', 100)
###Output
_____no_output_____
###Markdown
First: Read in the data Review Simple CommandsPractice downloading and reading into sample data
###Code
# Download and read the data (this may take more than 1 minute)
orig_data = pd.read_csv('../../assets/dataset/311-service-requests.csv', parse_dates=['Created Date'], low_memory=False)
plt.scatter(orig_data['Longitude'], orig_data['Latitude'], marker='.', color="purple")
###Output
_____no_output_____
###Markdown
Try this Example: Graph the number of noise complaints each hour in New York
###Code
complaints = orig_data[['Created Date', 'Complaint Type']]
noise_complaints = complaints[complaints['Complaint Type'] == 'Noise - Street/Sidewalk']
noise_complaints.set_index('Created Date').sort_index().resample('H', how=len).plot()
###Output
C:\Users\Ayham\Anaconda3\lib\site-packages\ipykernel\__main__.py:3: FutureWarning: how in .resample() is deprecated
the new syntax is .resample(...)..apply(<func>)
app.launch_new_instance()
###Markdown
Second: Using IPython Review Python BasicsTest your skills by answering the following questions: Question 1. Divide 10 by 20 and set the result to a variable named "A"
###Code
A = 10/20
print(A)
#### If you did not get a float (decimals) alter your equation to get the desired result (0.5)
A = 10./20
print(A)
###Output
0.5
###Markdown
Question 2. Create a function called division that will divide any two numbers and prints the result (with decimals). Call your function. Confirm that the results are as expected.
###Code
def division(numerator, denominator):
result = float(numerator) / denominator
print(result)
division(20, 10)
division(10, 20)
###Output
2.0
0.5
###Markdown
Question 3. Using .split() split my string into separate words
###Code
my_string = "the cow jumped over the moon"
words = my_string.split()
# returns ['the', 'cow', 'jumped', 'over', 'the', 'moon']
print(words)
my_string.split('o')
my_string.split()
###Output
_____no_output_____
###Markdown
Question 4. How many words are in my_string?
###Code
word_count = len(words)
#returns the number of words- 6
print(word_count)
len(my_string)
###Output
_____no_output_____
###Markdown
Question 5. Use a list comprehension to find the length of each wordList comprehension: a way to apply a function which loops through a list
###Code
length_of_each_word = [len(word) for word in words]
print(length_of_each_word)
[word[0] for word in words]
###Output
_____no_output_____
###Markdown
Question 6. Put the words back together in a variable called sentence using .join()
###Code
# put them back together with join
sentence = " ".join(words)
print(sentence)
###Output
the cow jumped over the moon
###Markdown
Bonus: Add a "||" between each word
###Code
# the " " puts the space in between the words. or you could put anything else in
alternate_sentence = "||".join(words)
print(alternate_sentence)
###Output
the||cow||jumped||over||the||moon
|
gui/my-webapp.ipynb | ###Markdown
Esempio webappEsempio di webapp mostrato per esecuzione con Voila**Per capire il funzionamento, vedere tutorial** [Applicazioni interattive - Sezione Webapp](https://it.softpython.org/gui/gui-sol.htmlWebapp)Nota: Le celle di testo di Jupyter come questa vengono mostrate anche da Voila.
###Code
import ipywidgets as widgets
from ipywidgets import Button, HBox, VBox, Tab, IntSlider, Label, HTML, AppLayout, Layout
# !!!! IMPORTANTE !!!!
# Il 'pyplot' che vedete qui sotto, che viene importato con il nome di 'plt'
# proviene dalla libreria di bqplot, NON E' lo stesso pyplot di matplotlib !!
# Gli autori di bqplot hanno adottato lo stesso nome e convenzioni per permettervi
# di riusare facilmente esempi che già conoscete di matplotlib
from bqplot import pyplot as plt
x = [2,4,6]
fig = plt.figure() # genera la figure
lines = plt.plot(x, [15,3,20])
plt.title('Grafico in bqplot')
slider1 = IntSlider()
bottone_vai_pag2 = Button(description="VAI PAGINA 2")
slider2 = IntSlider()
hbox2 = HBox([Button(description='clicca qui'), Button(description='cliccami!')])
tab1 = HBox(children=[fig, VBox([slider1, bottone_vai_pag2])])
tab2 = VBox(children=[slider2,
hbox2])
# al momento la prima 'pagina' è il widget Tab
pagina1 = widgets.Tab(children=[tab1, tab2], layout=Layout(min_height='350px'))
pagina1.set_title(0, 'TAB COL PLOT')
pagina1.set_title(1, 'ALTRA TAB')
bottone_vai_pag1 = Button(description="VAI PAGINA 1")
pagina2 = HBox([
Label("Questa è la seconda pagina"),
bottone_vai_pag1
],
layout=Layout(min_height='350px') )
# Il codice HTML è il codice con cui sono scritte le pagine web, qui lo
# usiamo per creare il titolo come esempio ma non è indispensabile conoscerlo
# Se vuoi saperne di più, prova a seguire questo tutorial: http://coderdojotrento.it/web1
titolo = HTML('<h1 style="color:orange">Webapp Incredibile</h1> <br/>')
# testo comune in fondo alla pagina
credits = Label("Credits: Interfacce Incredibili SRL")
# la struttura della nostra webapp è un pila VBox di elementi.
my_app = VBox( children=[titolo, # supponiamo che il titolo sia sempre visibile in tutto il sito
pagina1, # al momento la prima 'pagina' è il widget tab
credits]) # supponiamo che il titolo sia sempre visibile in tutto il sito
# questa funzione permette di cambiare la parte centrale della webapp passando un nuovo widget
def cambia_pagina(nuova_pagina):
# le parentesi tonde in questo contesto creano una tupla,
# cioè una sequenza immutabile di elementi):
my_app.children = (my_app.children[0], # il widget del titolo precedente
nuova_pagina, # widget che rappresenta la nuova pagina
my_app.children[2]) # il widget dei credits precedente
def bottone_vai_pag2_cliccato(b):
cambia_pagina(pagina2)
bottone_vai_pag2.on_click(bottone_vai_pag2_cliccato)
def bottone_vai_pag1_cliccato(b):
cambia_pagina(pagina1)
bottone_vai_pag1.on_click(bottone_vai_pag1_cliccato)
display(my_app)
###Output
_____no_output_____ |
day3/Hubble.ipynb | ###Markdown
Expansion velocity of the universeIn 1929, Edwin Hubble published a [paper](http://www.pnas.org/content/pnas/15/3/168.full.pdf) in which he compared the radial velocity of objects with their distance. The former can be done pretty precisely with spectroscopy, the latter is much more uncertain. His original data are [here](table1.txt).He saw that the velocity increases with distance and speculated that this could be the sign of a cosmological expansion. Let's find out what he did.Load the data into an array with `numpy.genfromtxt`, make use of its arguments `names` and `dtype` to read in the column names from the header and choosing the data type on its own as needed. You should get 6 columns * `CAT`, `NUMBER`: These two combined give you the name of the galaxy. * `R`: distance in Mpc * `V`: radial velocity in km/s * `RA`, `DEC`: equatorial coordinates of the galaxy Make a scatter plot of V vs R. Don't forget labels and units...
###Code
# load file into variable `data`
# plot data with plt.scatter
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Use `np.linalg.lstsq` to fit a linear regression function and determine the slope $H_0$ of the line $V=H_0 R$. For that, reshape $R$ as a $N\times1$ matrix (the design matrix) and solve for 1 unknown parameter. Add the best-fit line to the plot. Why is there scatter with respect to the best-fit curve? Is it fair to only fit for the slope and not also for the intercept? How would $H_0$ change if you include an intercept in the fit? Correcting for motion of the sun$V$ as given in the table is a combination of any assumed cosmic expansion and the motion of the sun with respect to that cosmic frame. So, we need to generalize the model to $V=H_0 R + V_s$, where the solar velocity is given by $V_s = X \cos(RA)\cos(DEC) + Y\sin(RA)\cos(DEC)+Z\sin(DEC)$. We'll use `astropy` to read in the RA/DEC coordinate strings and properly convert them to degrees (and then radians):
###Code
import astropy.coordinates as coord
import astropy.units as u
pos = coord.SkyCoord(ra=data['RA'].astype('U8'), dec=data['DEC'].astype('U9'), unit=(u.hourangle,u.deg),frame='fk5')
ra_ = pos.ra.to(u.deg).value * np.pi/180
dec_ = pos.dec.to(u.deg).value * np.pi/180
###Output
_____no_output_____
###Markdown
Expansion velocity of the universeIn 1929, Edwin Hubble published a [paper](http://www.pnas.org/content/pnas/15/3/168.full.pdf) in which he compared the radial velocity of objects with their distance. The former can be done pretty precisely with spectroscopy, the latter is much more uncertain. His original data are [here](table1.txt).He saw that the velocity increases with distance and speculated that this could be the sign of a cosmological expansion. Let's find out what he did.First, the usual python imports:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Exercise 1:Load the data into an array with `numpy.genfromtxt`, make use of its arguments `names` and `dtype` to read in the column names from the header and choosing the data type on its own as needed. You should get 6 columns * `CAT`, `NUMBER`: These two combined give you the name of the galaxy. * `R`: distance in Mpc * `V`: radial velocity in km/s * `RA`, `DEC`: equatorial coordinates of the galaxy Make a scatter plot of V vs R. Don't forget labels and units... Exercise 2:Use your knowledge of linear first to determine the slope $H_0$ of the line $V=H_0 R$. This is a linear model **with no intercept**. For that, reshape $R$ as a $N\times1$ design matrix and solve for 1 unknown parameter. Then, update the plot by adding the best-fit line. Can you guess the cause for the scatter with respect to the best-fit curve? Is it fair or even appropriate to fit only for the slope and not also for the intercept? How would $H_0$ change if you include an intercept in the fit? Correcting for motion of the sun$V$ as given in the table is a combination of any assumed cosmic expansion and the motion of the sun with respect to that cosmic frame. So, we need to generalize the model to $V=H_0 R + V_s$, where the solar velocity is given by $V_s = X \cos(RA)\cos(DEC) + Y\sin(RA)\cos(DEC)+Z\sin(DEC)$. We'll use `astropy` to read in the RA/DEC coordinate strings and properly convert them to degrees (and then radians):
###Code
import astropy.coordinates as coord
import astropy.units as u
pos = coord.SkyCoord(ra=data['RA'].astype('U8'), dec=data['DEC'].astype('U9'), unit=(u.hourangle,u.deg),frame='fk5')
ra_ = pos.ra.to(u.deg).value * np.pi/180
dec_ = pos.dec.to(u.deg).value * np.pi/180
###Output
_____no_output_____
###Markdown
Expansion velocity of the universeIn 1929, Edwin Hubble published a [paper](http://www.pnas.org/content/pnas/15/3/168.full.pdf) in which he compared the radial velocity of objects with their distance. The former can be done pretty precisely with spectroscopy, the latter is much more uncertain. His original data are [here](table1.txt).He saw that the velocity increases with distance and speculated that this could be the sign of a cosmological expansion. Let's find out what he did.First, the usual python imports:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Exercise 1:Load the data into an array with `numpy.genfromtxt`, make use of its arguments `names` and `dtype` to read in the column names from the header and choosing the data type on its own as needed. You should get 6 columns * `CAT`, `NUMBER`: These two combined give you the name of the galaxy. * `R`: distance in Mpc * `V`: radial velocity in km/s * `RA`, `DEC`: equatorial coordinates of the galaxy Make a scatter plot of V vs R. Don't forget labels and units...
###Code
# load file into variable `data` ...
data = np.genfromtxt('table1.txt', dtype = None, names = True, encoding ='utf8')
data
# make scatter plot
plt.scatter(data['R'], data['V'])
###Output
_____no_output_____
###Markdown
Exercise 2:Use your knowledge of linear first to determine the slope $H_0$ of the line $V=H_0 R$. This is a linear model **with no intercept**. For that, reshape $R$ as a $N\times1$ design matrix and solve for 1 unknown parameter. Then, update the plot by adding the best-fit line.
###Code
N = len(data)
R = data['R'].reshape(N, 1)
R
X = data['R'].reshape((N,1))
params, _, _, _ = np.linalg.lstsq(X, data['V'])
print(params)
H0 = params[0]
R = np.linspace(0,2.5,100)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data['R'], data['V'])
ax.plot(R, H0*R, 'k--')
###Output
[423.93732323]
###Markdown
Can you guess the cause for the scatter with respect to the best-fit curve? Is it fair or even appropriate to fit only for the slope and not also for the intercept? How would $H_0$ change if you include an intercept in the fit?
###Code
X = np.ones((N, 2))
X[:,1] = data['R']
params, _, _, _ = np.linalg.lstsq(X, data['V'])
print(params)
inter, H0 = params
R = np.linspace(0,2.5,100)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data['R'], data['V'])
ax.plot(R, H0*R + inter, 'k--')
ax.set_xlim(xmin=0, xmax=2.5)
ax.set_xlabel('Distance [Mpc]')
ax.set_ylabel('Velocity [km/s]')
###Output
(array([-40.7836491 , 454.15844092]), array([1193442.36627214]), 2, array([6.98607217, 2.17085022]))
###Markdown
Correcting for motion of the sun$V$ as given in the table is a combination of any assumed cosmic expansion and the motion of the sun with respect to that cosmic frame. So, we need to generalize the model to $V=H_0 R + V_s$, where the solar velocity is given by $V_s = X \cos(RA)\cos(DEC) + Y\sin(RA)\cos(DEC)+Z\sin(DEC)$. We'll use `astropy` to read in the RA/DEC coordinate strings and properly convert them to degrees (and then radians):
###Code
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
pos = coord.SkyCoord(ra=data['RA'].astype('U8'), dec=data['DEC'].astype('U9'), unit=(u.hourangle,u.deg),frame='fk5')
ra_ = pos.ra.to(u.deg).value * np.pi/180
dec_ = pos.dec.to(u.deg).value * np.pi/180
###Output
_____no_output_____
###Markdown
Exercise 3:Construct a new $N\times4$ design matrix for the four unknown parameters $H_0$, $X$, $Y$, $Z$ to account for the solar motion. The resulting $H_0$ is Hubble's own version of the "Hubble constant". What do you get?
###Code
Ah = np.column_stack((data['R'], np.cos(ra_)*np.cos(dec_), np.sin(ra_)*np.cos(dec_), np.sin(dec_)))
params_h, _, _, _ = np.linalg.lstsq(Ah, data['V'])
print(params_h)
H0 = params_h[0]
###Output
[ 465.17797833 -67.84096674 236.14706994 -199.58892695]
###Markdown
Make a scatter plot of $V-V_S$ vs $R$. How is it different from the previous one without the correction for solar velicity. Add the best-fit linear regression line.
###Code
VS = params_h[1]*data['R'] + params_h[2]*np.cos(ra_)*np.cos(dec_) + params_h[3]*np.sin(ra_)*np.cos(dec_)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data['R'], data['V'] - VS)
###Output
_____no_output_____
###Markdown
Exercise 4:Using `astropy.units`, can you estimate the age of the universe from $H_0$? Does it make sense?
###Code
H0q = H0 * u.km / u.s / u.Mpc
(1./H0q).to(u.Gyr)
###Output
_____no_output_____
###Markdown
Expansion velocity of the universeIn 1929, Edwin Hubble published a [paper](http://www.pnas.org/content/pnas/15/3/168.full.pdf) in which he compared the radial velocity of objects with their distance. The former can be done pretty precisely with spectroscopy, the latter is much more uncertain. His original data are [here](table1.txt).He saw that the velocity increases with distance and speculated that this could be the sign of a cosmological expansion. Let's find out what he did.First, the usual python imports:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Exercise 1:Load the data into an array with `numpy.genfromtxt`, make use of its arguments `names` and `dtype` to read in the column names from the header and choosing the data type on its own as needed. You should get 6 columns * `CAT`, `NUMBER`: These two combined give you the name of the galaxy. * `R`: distance in Mpc * `V`: radial velocity in km/s * `RA`, `DEC`: equatorial coordinates of the galaxy Make a scatter plot of V vs R. Don't forget labels and units...
###Code
# load file into variable `data` ...
# make scatter plot
###Output
_____no_output_____
###Markdown
Exercise 2:Use your knowledge of linear first to determine the slope $H_0$ of the line $V=H_0 R$. This is a linear model **with no intercept**. For that, reshape $R$ as a $N\times1$ design matrix and solve for 1 unknown parameter. Then, update the plot by adding the best-fit line. Can you guess the cause for the scatter with respect to the best-fit curve? Is it fair or even appropriate to fit only for the slope and not also for the intercept? How would $H_0$ change if you include an intercept in the fit? Correcting for motion of the sun$V$ as given in the table is a combination of any assumed cosmic expansion and the motion of the sun with respect to that cosmic frame. So, we need to generalize the model to $V=H_0 R + V_s$, where the solar velocity is given by $V_s = X \cos(RA)\cos(DEC) + Y\sin(RA)\cos(DEC)+Z\sin(DEC)$. We'll use `astropy` to read in the RA/DEC coordinate strings and properly convert them to degrees (and then radians):
###Code
import astropy.coordinates as coord
import astropy.units as u
pos = coord.SkyCoord(ra=data['RA'].astype('U8'), dec=data['DEC'].astype('U9'), unit=(u.hourangle,u.deg),frame='fk5')
ra_ = pos.ra.to(u.deg).value * np.pi/180
dec_ = pos.dec.to(u.deg).value * np.pi/180
###Output
_____no_output_____
###Markdown
Expansion velocity of the universeIn 1929, Edwin Hubble published a [paper](http://www.pnas.org/content/pnas/15/3/168.full.pdf) in which he compared the radial velocity of objects with their distance. The former can be done pretty precisely with spectroscopy, the latter is much more uncertain. His original data are [here](table1.txt).He saw that the velocity increases with distance and speculated that this could be the sign of a cosmological expansion. Let's find out what he did.First, the usual python imports:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Exercise 1:Load the data into an array with `numpy.genfromtxt`, make use of its arguments `names` and `dtype` to read in the column names from the header and choosing the data type on its own as needed. You should get 6 columns * `CAT`, `NUMBER`: These two combined give you the name of the galaxy. * `R`: distance in Mpc * `V`: radial velocity in km/s * `RA`, `DEC`: equatorial coordinates of the galaxy Make a scatter plot of V vs R. Don't forget labels and units...
###Code
data = np.genfromtxt("table1.txt", dtype = ('S8', 'S8', float, float, 'S8', 'S8'), skip_header = 0, names = True)
print(np.shape(data))
distance = data['R']
velocity = data['V']
plt.scatter(distance, velocity)
plt.xlabel("Distance (Mpc)")
plt.ylabel("Radial Velocity (km/s)")
###Output
(24,)
###Markdown
Exercise 2:Use your knowledge of linear first to determine the slope $H_0$ of the line $V=H_0 R$. This is a linear model **with no intercept**. For that, reshape $R$ as a $N\times1$ design matrix and solve for 1 unknown parameter. Then, update the plot by adding the best-fit line.
###Code
R = distance[:, np.newaxis]
V = velocity[:, np.newaxis]
pars_Cov = np.linalg.inv(np.matmul(R.T, R))
best_pars = np.matmul(np.matmul(pars_Cov, R.T), V)
print(best_pars)
plt.scatter(distance, velocity)
plt.xlabel("Distance (Mpc)")
plt.ylabel("Radial Velocity (km/s)")
plt.plot(R, best_pars*R)
###Output
[[423.93732323]]
###Markdown
Can you guess the cause for the scatter with respect to the best-fit curve? Is it fair or even appropriate to fit only for the slope and not also for the intercept? How would $H_0$ change if you include an intercept in the fit? Correcting for motion of the sun$V$ as given in the table is a combination of any assumed cosmic expansion and the motion of the sun with respect to that cosmic frame. So, we need to generalize the model to $V=H_0 R + V_s$, where the solar velocity is given by $V_s = X \cos(RA)\cos(DEC) + Y\sin(RA)\cos(DEC)+Z\sin(DEC)$. We'll use `astropy` to read in the RA/DEC coordinate strings and properly convert them to degrees (and then radians):
###Code
import astropy.coordinates as coord
import astropy.units as u
pos = coord.SkyCoord(ra=data['RA'].astype('U8'), dec=data['DEC'].astype('U9'), unit=(u.hourangle,u.deg),frame='fk5')
ra = pos.ra.to(u.deg).value * np.pi/180
dec = pos.dec.to(u.deg).value * np.pi/180
###Output
_____no_output_____
###Markdown
Exercise 3:Construct a new $N\times4$ design matrix for the four unknown parameters $H_0$, $X$, $Y$, $Z$ to account for the solar motion. The resulting $H_0$ is Hubble's own version of the "Hubble constant". What do you get?Make a scatter plot of $V-V_S$ vs $R$. How is it different from the previous one without the correction for solar velicity. Add the best-fit linear regression line.
###Code
vs1 = np.cos(ra)*np.cos(dec)
vs2 = np.sin(ra)*np.cos(dec)
vs3 = np.sin(dec)
X = np.vstack((distance, vs1, vs2, vs3)).T
pars_Cov = np.linalg.inv(np.matmul(X.T, X))
best_pars = np.matmul(np.matmul(pars_Cov, X.T), V)
print(best_pars)
H_0 = best_pars[0]
Vs = X[:,1:] @ best_pars[1:]
plt.scatter(R, np.subtract(V,Vs))
plt.plot(R, best_pars[0]*R)
###Output
[[ 465.18113259]
[ -67.83696743]
[ 236.14419034]
[-199.58718077]]
###Markdown
Exercise 4:Using `astropy.units`, can you estimate the age of the universe from $H_0$? Does it make sense?
###Code
H = float(H_0) * u.km / u.s / u.Mpc
age = 1/H
age.to(u.yr)
###Output
_____no_output_____
###Markdown
Measurement errorsSo far we have not incorporated any measurement uncertainties. Exercise 5:Can you guess or estimate them from the scatter with respect to the best-fit line? You may want to look at the residuals with respect to the best-fit model.With this error estimate, construct a covariance matrix $\Sigma$ and repeat the linear regression, this time with errors, to get a new estimate of $H_0$. Has it changed?
###Code
residuals = np.subtract(np.subtract(V,Vs), H_0*R)
sigma = np.diag(residuals[:,0])
sigma_inv = np.linalg.inv(sigma)
pars_Cov = np.linalg.inv(np.matmul(np.matmul(X.T, sigma_inv), X))
best_pars = np.matmul(np.matmul(np.matmul(pars_Cov, X.T),sigma_inv), np.subtract(V,Vs))
Ho_2 = best_pars[0]
print(Ho_2)
###Output
[783.40330493]
###Markdown
Exercise 6:Compute the parameter covariance matrix and read off the variance of $H_0$. Update your plot to illustrate that uncertainty.How large is the relative error? Would that help with the problematic age estimate above? If not, what do you think is going on?
###Code
pars_Cov = np.linalg.inv(np.matmul(np.matmul(X.T, sigma_inv), X))
H_var = pars_Cov[0][0]
plt.scatter(R, np.subtract(V,Vs))
plt.plot(R, Ho_2*R)
plt.plot(R, (Ho_2+H_var)*R)
plt.plot(R, (Ho_2-H_var)*R)
###Output
_____no_output_____ |
feature-extraction_v1.0.ipynb | ###Markdown
Extracting from `morph.tf` all morphological categories as separate Features for the Tischendorf TF-AppCody Kingham, MA, MA, PhD cand ([University of Cambridge](https://www.cam.ac.uk/)) has stored the morphological anylsis of the Tischendorf text (provided by Ulrik Sandborg-Petersen: https://github.com/morphgnt/tischendorf-data/)in the feature list `morph.tf`(https://github.com/codykingham/tischendorf_tf). I have (1) opened that list within excel (2) deleted the first lines containing the TF feature information and (3) stored the morphology as morph_quite-orig.xlsx ("quite-orig" because it does not contain the original TF feature information). This is the feature information I deleted:After that the following procedure was applied:1. Identifying the different morphological features contained in the morph code by comparing the Tisch TF-app with the Tischendorf text in [Logos](https://ref.ly/logosres/tischnt?ref=BibleTISCH.Mt1). Below you find the coding for the first words of Matthew 1:1-2:2. Extracting the different morphology features and adding them with new tagging as additional columns in the pandas df.3. Exporting the completed df as `tischendorffeatures_v1.0.xlsx`4. Copy pasting from the exported spreadsheet the new tf files as seperated morphological features: - sp (part of speech) - nu (number) - ps (person) - vt (verbal tense) - voice - mood - case - gn (gender) - nountype - prntyp (prnoun type) - originterrdiff (the original morphology distinguished between two different interrogative pronouns, that distinction is found in originterrdiff)
###Code
import sys, os, collections
import pandas as pd
import numpy as np
import re
###Output
_____no_output_____
###Markdown
Extraction process Loading original `morph.tf` as df
###Code
featureprep=pd.read_excel('d:/OneDrive/1200_AUS-research/Fabric-TEXT/Tischendorf-feature-project/morph_quite-orig.xlsx',delimiter='\t',encoding='utf-16')
pd.set_option('display.max_columns', 50)
featureprep.head(20)
featureprep.dtypes
###Output
_____no_output_____
###Markdown
Lets change the orig columns to strings Adding Part of Speech
###Code
def spconditions(row):
if re.search('^A.*', str(row)):
return 'adjective'
if re.search('^ADV.*', str(row)):
return 'adverb'
if re.search('^ARAM.*', str(row)):
return 'aramaic-form-indeclinable'
if re.search('^C.*', str(row)):
return 'pronoun'
if re.search('^COND.*', str(row)):
return 'conjunction-cond'
if re.search('^CONJ.*', str(row)):
return 'conjunction'
if re.search('^D.*', str(row)):
return 'pronoun'
if re.search('^F.*', str(row)):
return 'pronoun'
if re.search('^HEB.*', str(row)):
return 'hebrew-form-indeclinable'
if re.search('^I.*', str(row)):
return 'pronoun'
if re.search('^INJ.*', str(row)):
return 'interjection'
if re.search('^K.*', str(row)):
return 'pronoun'
if re.search('^N-.*', str(row)):
return 'noun'
if re.search('^P-.*', str(row)):
return 'pronoun'
if re.search('^PREP$', str(row)):
return 'preposition'
if re.search('^PRT.*', str(row)):
return 'particle'
if re.search('^Q.*', str(row)):
return 'pronoun'
if re.search('^R.*', str(row)):
return 'pronoun'
if re.search('^S.*', str(row)):
return 'pronoun'
if re.search('^T-.*', str(row)):
return 'article'
if re.search('^V-.*', str(row)):
return 'verb'
if re.search('^X.*', str(row)):
return 'pronoun'
else:
return ''
featureprep['sp']=featureprep['origcode'].apply(spconditions)
featureprep.head(50)
featureprep['sp'].value_counts()
###Output
_____no_output_____
###Markdown
Adding Gender
###Code
def gender(row):
if re.search('.*F$', str(row)):
return 'f'
if re.search('.*M$', str(row)):
return 'm'
if re.search('.*N$', str(row)):
return 'n'
else:
return ''
featureprep['gn']=featureprep['origcode'].apply(gender)
featureprep.head(20)
###Output
_____no_output_____
###Markdown
Adding Number
###Code
def number(row):
if re.search('-.*S$', str(row)):
return 'sg'
if re.search('-.*S[MFN]$', str(row)):
return 'sg'
if re.search('-.*P$', str(row)):
return 'pl'
if re.search('-.*P[MFN]$', str(row)):
return 'pl'
else:
return ''
featureprep['nu']=featureprep['origcode'].apply(number)
featureprep.head(50)
###Output
_____no_output_____
###Markdown
Adding Person
###Code
def person(row):
if re.search('-.*1[SP]', str(row)):
return 'p1'
if re.search('-1[NGDASP]', str(row)):
return 'p1'
if re.search('-.*2[SP]', str(row)):
return 'p2'
if re.search('-2[NGDASP]', str(row)):
return 'p2'
if re.search('-.*3[SP]', str(row)):
return 'p3'
if re.search('-3[NGDASP]', str(row)):
return 'p3'
else:
return ''
featureprep['ps']=featureprep['origcode'].apply(person)
featureprep.head(20)
###Output
_____no_output_____
###Markdown
Adding Case
###Code
def case(row):
if re.search('-N[SP].*', str(row)):
return 'nominative'
if re.search('-[123]N[SP].*', str(row)):
return 'nominative'
if re.search('-G[SP].*', str(row)):
return 'genitive'
if re.search('-[123]G[SP].*', str(row)):
return 'genitive'
if re.search('-D[SP].*', str(row)):
return 'dative'
if re.search('-[123]D[SP].*', str(row)):
return 'dative'
if re.search('-A[SP].*', str(row)):
return 'accusative'
if re.search('-[123]A[SP].*', str(row)):
return 'accusative'
if re.search('-V[SP].*', str(row)):
return 'vocative'
if re.search('-[123]V[SP].*', str(row)):
return 'vocative'
if re.search('-PRI$', str(row)):
return 'indeclinable'
else:
return ''
featureprep['case']=featureprep['origcode'].apply(case)
featureprep.head(20)
###Output
_____no_output_____
###Markdown
Adding Tense
###Code
def tense(row):
if re.search('V-A.*', str(row)):
return 'aorist'
if re.search('V-[0-9][A].*', str(row)):
return 'aorist'
if re.search('V-P.*', str(row)):
return 'present'
if re.search('V-[0-9][P].*', str(row)):
return 'present'
if re.search('V-F.*', str(row)):
return 'future-I'
if re.search('V-[0-9][F].*', str(row)):
return 'future-I'
if re.search('V-I.*', str(row)):
return 'imperfect'
if re.search('V-[0-9][I].*', str(row)):
return 'imperfect'
if re.search('V-L.*', str(row)):
return 'plusquamperfect'
if re.search('V-[0-9][L].*', str(row)):
return 'plusquamperfect'
if re.search('V-R.*', str(row)):
return 'perfect'
if re.search('V-[0-9][R].*', str(row)):
return 'perfect'
if re.search('V-T.*', str(row)):
return 'future-II'
if re.search('V-[0-9][T].*', str(row)):
return 'future-II'
else:
return ''
featureprep['vt']=featureprep['origcode'].apply(tense)
featureprep.head(20)
###Output
_____no_output_____
###Markdown
Adding Voice
###Code
def voice(row):
if re.search('V-[A-Z][A].*', str(row)):
return 'active'
if re.search('V-[A-Z][O].*', str(row)):
return 'passive'
if re.search('V-[A-Z][P].*', str(row)):
return 'passive'
if re.search('V-[A-Z][D].*', str(row)):
return 'medium'
if re.search('V-[A-Z][N].*', str(row)):
return 'mediumorpassive'
else:
return ''
featureprep['voice']=featureprep['origcode'].apply(voice)
featureprep.head(20)
###Output
_____no_output_____
###Markdown
Adding Mood
###Code
def mood(row):
if re.search('V-[A-Z][A-Z][I].*', str(row)):
return 'indicative'
if re.search('V-[0-9][A-Z][A-Z][I].*', str(row)):
return 'indicative'
if re.search('V-[A-Z][A-Z][M].*', str(row)):
return 'imperative'
if re.search('V-[0-9][A-Z][A-Z][M].*', str(row)):
return 'imperative'
if re.search('V-[A-Z][A-Z][N].*', str(row)):
return 'infinitive'
if re.search('V-[0-9][A-Z][A-Z][N].*', str(row)):
return 'infinitive'
if re.search('V-[A-Z][A-Z][O].*', str(row)):
return 'optative'
if re.search('V-[0-9][A-Z][A-Z][O].*', str(row)):
return 'optative'
if re.search('V-[A-Z][A-Z][P].*', str(row)):
return 'participle'
if re.search('V-[0-9][A-Z][A-Z][P].*', str(row)):
return 'participle'
if re.search('V-[A-Z][A-Z][S].*', str(row)):
return 'subjunctive'
if re.search('V-[0-9][A-Z][A-Z][S].*', str(row)):
return 'subjunctive'
else:
return ''
featureprep['mood']=featureprep['origcode'].apply(mood)
featureprep.head(50)
###Output
_____no_output_____
###Markdown
Adding Pronoun type
###Code
def pronountype(row):
if re.search('^P-', str(row)):
return 'pers'
if re.search('^D-', str(row)):
return 'demo'
if re.search('^I-', str(row)):
return 'interr'
if re.search('^X-', str(row)):
return 'indef'
if re.search('^F-', str(row)):
return 'reflex'
if re.search('^K-', str(row)):
return 'correl'
if re.search('^R-', str(row)):
return 'relativ'
if re.search('^S-', str(row)):
return 'posses'
if re.search('^C-', str(row)):
return 'recip'
if re.search('^Q-', str(row)):
return 'interr'
else:
return ''
featureprep['prntyp']=featureprep['origcode'].apply(pronountype)
featureprep.head(50)
###Output
_____no_output_____
###Markdown
Adding Noun Type
###Code
def propernoun(row):
if re.search('-PRI$', str(row)):
return 'nmpr'
else:
return ''
featureprep['nountype']=featureprep['origcode'].apply(propernoun)
featureprep.head(50)
###Output
_____no_output_____
###Markdown
Adding Orig Interrogative differential
###Code
def originterrdiff(row):
if re.search('^I.*', str(row)):
return 'I'
if re.search('^Q.*', str(row)):
return 'Q'
else:
return ''
featureprep['originterrdiff']=featureprep['origcode'].apply(originterrdiff)
featureprep['originterrdiff'].value_counts()
###Output
_____no_output_____
###Markdown
Exporting proces
###Code
featureprep.head()
###Output
_____no_output_____
###Markdown
reordersorting first...
###Code
featureprep.sort_values(['origorder'], ascending=True).head(10)
###Output
_____no_output_____
###Markdown
to excel spreadsheet...
###Code
featureprep.to_excel('d:/OneDrive/1200_AUS-research/Fabric-TEXT/Tischendorf-feature-project/tischendorffeatures_v1.0.xlsx')
###Output
_____no_output_____
###Markdown
Misc Features to txt files...
###Code
# tischendorffeatures[['origorder', 'nu']].to_csv('d:/OneDrive/1200_AUS-research/Fabric-TEXT/Tischendorf-feature-project/TF_features_gn.csv')
###Output
_____no_output_____ |
_build/html/_sources/content/Section_02/Visual_diagnostics.ipynb | ###Markdown
Visual diagnosticsWe will discuss:* Trace plots* Autocorrelation plots* Rank plots Trace plots az.plot_trace()MCMC samples should not be sensitive to the starting point, so if you sample more than one chain (starting from different places) you should basically get the _same_ distribution within certain small error.As we already discussed in the numerical diagnostic section, MCMC samples should have the lowest possible autocorrelation Trace plots can help diagnose:* Bad intialization* Difficult topologies (such as Neal's funnel)* Multimodal posteriors Pathological tracesThe following figure shows examples of problematic samples:On the first row we see that the MCMC chains has large autocorrelation, you can see the trace (left column) shows long regions of monoticity (the lines parallel to the x-axis). This could be a consequence of a multimodal posterior with barrier between modes of very low probability. Thus the samples has trouble to freely move from mode to mode. Another explanation could be high correlation between parameters, this can also be problematic for some samplers specially Metropolis. In such cases the multimodality could be _apparent_ and not a real feature of our posterior.On the second row we see two chains that started from two very different position and eventually converge to the same distribution. The first $\approx$ 25 samples could bias our results so we can just remove them (*burn-in*).ON the last row, we see two chains exploring two different regions of the parameter space. From the trace it seems they are in fact approaching each other at a slow rate and the maybe eventually reach the same stationary distribution. Autocorrelation plot az.plot_autocorr()As we discussed in the Numerical Diagnostics section, we can see autocorrelation as a factor that decrease the actual amount of information contained in a sample. So we want to reduce autocorrelation as much as possible.
###Code
bad_chains = np.linspace(0, 1, 1000).reshape(2, -1)
az.plot_autocorr(bad_chains)
###Output
_____no_output_____
###Markdown
The autocorrelation plot shows the _degree of autocorrelation_ by default it used a maximum window of 100. The previous figure, corresponding to `bad_chains` show a very high autocorrelation while the next figure corresponding to `good_chains` show a very low autocorrelation.
###Code
good_chains = stats.uniform.rvs(0, 1, size=(2, 500))
az.plot_autocorr(good_chains)
###Output
_____no_output_____
###Markdown
Rank plot az.plot_rank()Rank plots are histograms of the ranked posterior draws, ranked over all chains and then plotted separately for each chain. The idea behind this plot is that if all of the chains are targeting the same posterior, we expect the ranks in each chain to be uniform. Additionally, if rank plots of all chains look similar, this indicates good mixing of the chainsThis is a [recently](https://arxiv.org/abs/1903.08008) proposed visual test, author argue superiority over trace plots:
###Code
az.plot_rank(good_chains);
###Output
_____no_output_____
###Markdown
We can see that for the `good_chains` the histogram of the ranks is more or less uniform, uniformity will increasing with the sample size, and we can also see that both chains look similar with not distinctive pattern. This is in clear contrast the results for the `bad_chains`, while they are uniform both chains are exploring two separate set of values. Notice how this is consistent to the way we create `bad_chains`, 1000 ordered number from 0 to 1 split in two halves.
###Code
az.plot_rank(bad_chains);
###Output
_____no_output_____
###Markdown
The following is a snippet so you can get a better intuition of how to interpret rank plots. Notice that `az.plot_rank` is doing a more involved computation, but to get intuition this block of code should be enough. Here the histogram of the rank (right panel) is rotated with respect to the previous histogram to match the cumulative distribution on the left panel. So you can see the bottom bar on the right contains the first 100 values from the cumulative distribution on the left, the second bar the second 100 values and so on. You can see a rank plot as a device for detecting an excess of any given number, try for example uncommenting the line before and see how and excess of zeros affects the rank plot.
###Code
original_data = np.random.beta(2, 2, 1000)
#original_data[:100] = 0
ranked_data = stats.rankdata(original_data)
_, ax = plt.subplots(1, 2, figsize=(12, 4), sharey=True)
ax[0].plot(original_data, ranked_data, 'b.')
ax[0].set_xlabel('data values')
ax[0].set_ylabel('rank')
ax[0].set_xlim([0, 1])
ax[1].hist(ranked_data, bins=10, rwidth=0.9, orientation='horizontal')
ax[1].set_xlabel('frecuency');
###Output
_____no_output_____ |
notebooks/b2p_comments_data_extraction.ipynb | ###Markdown
OverviewThis dataset comes from Bridges to Prosperity, an NGO that builds bridges to help people in need get easier acess to schools, hospitals, and markets.This notebook is used to extract specific columns from a paragraph of data in the `Comments` column of our dataset. The delimiters, casing, and wording sometimes vary between and within columns, so we used regular expressions for their versatility. Imports and reading in dataset
###Code
import re
import pandas as pd
bridges_df = pd.read_excel('B2P Dataset_2020.10.xlsx')
bridges_df.info()
# Display all columns, without truncation
pd.set_option('display.max_columns', None)
bridges_df
###Output
_____no_output_____
###Markdown
Test Regex Matches
###Code
# Example of paragraph of data
test_string_2 = bridges_df['Bridge Opportunity: Comments'][2]
test_string_2
comments = bridges_df['Bridge Opportunity: Comments']
non_nan_comments = comments.notna()
comments
comments[1] > 0 or comments[1] <= 0
comments[1] == comments[1]
pd.notna(comments[1])
people_directly_served_matches = 0
people_directly_served_non_matches = set()
for i in range(len(comments)):
if non_nan_comments[i]:
people_directly_served = re.search(
pattern=r'([\d-]+) people directly served',
string=comments[i])
if people_directly_served:
people_directly_served_matches += 1
else:
people_directly_served_non_matches.add(i)
people_directly_served_matches
# All indexes that aren't a paragraph (and for which these regex don't apply)
non_formatted_comment_indexes = people_directly_served_non_matches
def count_regex_matches(
regex: str,
comments: pd.Series,
ignore: set
) -> int:
match_count = 0
non_match_indexes = set()
match_indexes = set()
for index, comment in enumerate(comments):
if pd.notna(comment):
if re.search(regex, comment):
match_count += 1
match_indexes.add(index)
else:
if index not in ignore:
non_match_indexes.add(index)
print('Match count:', match_count)
print('Match indexes:', match_indexes)
print('Non-match indexes:', non_match_indexes)
count_regex_matches('([\w\s]+) injur', comments, ignore=non_formatted_comment_indexes)
comments[388]
# Matches range of people directly served, e.g. 6000-10000
# Case insensitive (?i)
people_directly_served = re.search(
pattern=r'(?i)([\d-]+) people directly served',
string=test_string_2
)
if people_directly_served: print(people_directly_served.group(1))
# Matches elevation in meters
# Note: Change column name to be Elevation (meters) so each column value can be an integer
# Then cast this string as int
elevation = re.search(
pattern=r'Elevation:(\d+)',
string=test_string_2
)
if elevation: print(elevation.group(1))
# Matches the 'cell' of the bridge site, optional space after Cell, optional dash between
# word characters
cell = re.search(
pattern=r'Cell:(\w+\-?\w*)',
string=test_string_2
)
if cell: print(cell.group(1))
bridge_connection = re.search(
pattern=r'Bridge (?:[\s\w]+)?connect(?:\w*) ([\w\s-]+)',
string=test_string_2
)
if bridge_connection: print(bridge_connection.group(1)) # None
injuries = re.search(
pattern=r'([\w\s]+) injur',
string=test_string_2
)
if injuries: print(injuries.group(1))
deaths = re.search(
pattern=r'([\w\s]+) die',
string=test_string_2
)
if deaths: print(deaths.group(1))
crossing_frequency = re.search(
pattern=r'(?i)Cross river on a normal day-?([>\d-]+)',
string=test_string_2
)
if crossing_frequency: print(crossing_frequency.group(1))
nearby_city_centers = re.search(
pattern=r'Nearby city centers--?([a-zA-Z -]*)',
string=test_string_2
)
if nearby_city_centers: print(nearby_city_centers.group(1).replace(' -', ', '))
current_crossing_method = re.search(
pattern=r'(?i)Crossing River now-([\w\s]+)',
string=test_string_2
)
if current_crossing_method: print(current_crossing_method.group(1))
river_crossing_difficulty = re.search(
pattern=r'(?i)Impossible/Dangerous to cross the river-([\w\s\-?]+)',
string=test_string_2
)
if river_crossing_difficulty: print(river_crossing_difficulty.group(1))
hours_to_nearest_crossing = re.search(
pattern=r'(?i)Travel to nearest safe bridge/river crossing-([>\w\s]+)',
string=test_string_2)
if hours_to_nearest_crossing: print(hours_to_nearest_crossing.group(1))
hours_to_hospital = re.search(
pattern=r'(?i)Hours walking to reach the Hospital-([.\d-]+)',
string=test_string_2
)
if hours_to_hospital: print(hours_to_hospital.group(1))
hours_to_health_center = re.search(
pattern=r'(?i)Hours walking to reach the Health Center-([.\d-]+)',
string=test_string_2
)
if hours_to_health_center: print(hours_to_health_center.group(1))
# Rename to Hours walking (no of), like the rest of the columns
hours_to_market = re.search(
pattern=r'(?i)Hours of walking to reach the market-([.\d-]+)',
string=test_string_2
)
if hours_to_market: print(hours_to_market.group(1))
hours_to_primary_school = re.search(
pattern=r'(?i)Hours walking to reach Primary School-([.\d-]+)',
string=test_string_2
)
if hours_to_primary_school: print(hours_to_primary_school.group(1))
hours_to_secondary_school = re.search(
pattern=r'(?i)Hours walking to reach Secondary School-([.\d-]+)',
string=test_string_2)
if hours_to_secondary_school: print(hours_to_secondary_school.group(1))
hours_to_church = re.search(
pattern=r'(?i)Hours walking to reach the Church-([.\d-]+)',
string=test_string_2
)
if hours_to_church: print(hours_to_church.group(1))
land_by_river_bank = re.search(
pattern=r'(?i)Land within 50m of river bank-([\w\s()]+)',
string=test_string_2
)
if land_by_river_bank: print(land_by_river_bank.group(1))
# Sometimes next column is separated with comma
soil = re.search(
pattern=r'Soil-([\w\s]+),Sand',
string=test_string_2
)
if soil: print(soil.group(1))
sand_availability = re.search(
pattern=r'Sand-([\w\s]+)',
string=test_string_2
)
if sand_availability: print(sand_availability.group(1))
gravel_availability = re.search(
pattern=r'Gravel-([\w\s,]+)',
string=test_string_2
)
if gravel_availability: print(gravel_availability.group(1))
stone_availability = re.search(
pattern=r'Stone-([\w\s,]+) ?/',
string=test_string_2
)
if stone_availability: print(stone_availability.group(1))
timber_availability = re.search(
pattern=r'Timber-([\w\s,]+)',
string=test_string_2
)
if timber_availability: print(timber_availability.group(1))
stone_provided_by = re.search(
pattern=r'Stone provided by-([\w\s,]+)',
string=test_string_2
)
if stone_provided_by: print(stone_provided_by.group(1))
sand_provided_by = re.search(
pattern=r'Sand Provided by-([\w\s,]+)',
string=test_string_2
)
if sand_provided_by: print(sand_provided_by.group(1))
gravel_provided_by = re.search(
pattern=r'Gravel provided by-([\w\s,]+)',
string=test_string_2
)
if gravel_provided_by: print(gravel_provided_by.group(1))
timber_provided_by = re.search(
pattern=r'Timber provided by-([\w\s,]+)',
string=test_string_2
)
if timber_provided_by: print(timber_provided_by.group(1))
cement_provided_by = re.search(
pattern=r'Cement provided by-([\w\s,]+)',
string=test_string_2
)
if cement_provided_by: print(cement_provided_by.group(1))
reinforcement_steel_provided_by = re.search(
pattern=r'Reinforcement steel provided by-([\w\s,]+)',
string=test_string_2
)
if reinforcement_steel_provided_by: print(reinforcement_steel_provided_by.group(1))
land_ownership = re.search(
pattern=r'Land ownership-([\w\s,]+)',
string=test_string_2
)
if land_ownership: print(land_ownership.group(1))
land_ownership_permission = re.search(
pattern=r'Land ownership permission-([\w\s,]+)',
string=test_string_2
)
if land_ownership_permission: print(land_ownership_permission.group(1))
proposed_bridge_location = re.search(
pattern=r'proposed bridge location is ([\w\s,]+).?-',
string=test_string_2
)
if proposed_bridge_location: print(proposed_bridge_location.group(1))
# Strip m from values, put m in column name
proposed_bridge_span = re.search(
pattern=r'proposed bridge span is (?:approximately )?(\d+)\w*.?-',
string=test_string_2
)
if proposed_bridge_span: print(proposed_bridge_span.group(1))
# Also in meters
level_difference_between_two_banks = re.search(
pattern=r'level difference between two banks is ([\d.]+)',
string=test_string_2
)
if level_difference_between_two_banks: print(level_difference_between_two_banks.group(1))
space_for_foundation = re.search(
pattern=r'space for foundation is (\w+\s*)',
string=test_string_2
)
if space_for_foundation: print(space_for_foundation.group(1))
free_board = re.search(
pattern=r'free board between the lowest point of the proposed bridge and the highest flood level(?: is)? (\w+\s*)',
string=test_string_2
)
if free_board: print(free_board.group(1))
river_bed_status = re.search(
pattern=r'river bed at the site is ([\w\s,]+)',
string=test_string_2
)
if river_bed_status: print(river_bed_status.group(1))
river_bank_status = re.search(
r'river bank of the site is ([\w\s,]+)',
string=test_string_2
)
if river_bank_status: print(river_bank_status.group(1))
soil_from_site = re.search(
pattern=r'soil from the site is ([\w\s]+)',
string=test_string_2
)
if soil_from_site: print(soil_from_site.group(1))
confluence_area_near_place = re.search(
r'([\w\s]+\bconfluence\b[\w\s]+)',
string=test_string_2
)
if confluence_area_near_place: print(confluence_area_near_place.group(1))
###Output
_____no_output_____
###Markdown
Editing FeaturesNow that we've found regular expressions to match data in the comments column, we will add them to the dataset by either engineering new features or editing existing ones. Since the comments data is from 2013 while the other column data is from 2018, if data already exists in a separate column, we will keep it. We will only add old data to an existing column if that column's row is null. Columns in comments data that correspond to existing columns1. `People directly served` (corresponds to `Bridge Opportunity: Individuals Directly Served`)2. `Injuries` (roughly corresponds to `River crossing injuries in last 3 years`)3. `Deaths` (roughly corresponds to `River crossing deaths in last 3 years`)4. `Nearby city centers` (corresponds to `Name of nearest city`)5. `Crossing River now` (corresponds to `Current crossing method`) First, we'll map extracted data to their corresponding 5 existing columns wherever the values in those columns are null.
###Code
comments = bridges_df['Bridge Opportunity: Comments']
extracted = bridges_df.copy()
# Boolean array used to check whether there is data to extract
non_nan_comments = bridges_df['Bridge Opportunity: Comments'].notna()
# Boolean arrays used to check if values in existing columns are null (in which case
# they will be changed, else they will be left the same)
individuals_served_nans = bridges_df['Bridge Opportunity: Individuals Directly Served'].notna()
injuries_nans = bridges_df['River crossing injuries in last 3 years'].notna()
deaths_nans = bridges_df['River crossing deaths in last 3 years'].notna()
nearest_city_nans = bridges_df['Name of nearest city'].notna()
river_crossing_nans = bridges_df['Current crossing method'].notna()
comment_count = 0
for i in range(len(bridges_df)):
if non_nan_comments[i]:
if individuals_served_nans[i]:
people_directly_served = re.search(
pattern=r'(?i)([\d-]+) people directly served',
string=comments[i])
if people_directly_served:
# This is a range separated by -. Take its midpoint.
nums = [int(num) for num in people_directly_served.group(1).split('-')]
avg = sum(nums) / len(nums)
extracted['Bridge Opportunity: Individuals Directly Served'][i] = avg
if injuries_nans[i]:
injuries = re.search(pattern=r'([\w\s]+) injur', string=comments[i])
if injuries:
extracted['River crossing injuries in last 3 years'][i] = injuries.group(1)
if deaths_nans[i]:
deaths = re.search(pattern=r'([\w\s]+) die', string=comments[i])
if deaths:
extracted['River crossing deaths in last 3 years'][i] = deaths.group(1)
if nearest_city_nans[i]:
nearest_city = re.search(
pattern=r'Nearby city centers--?([a-zA-Z -]*)',
string=comments[i])
if nearest_city:
extracted['Name of nearest city'][i] = nearest_city.group(1).replace(' -', ', ')
if river_crossing_nans[i]:
river_crossing = re.search(
pattern=r'(?i)Crossing River now-([\w\s]+)',
string=comments[i])
if river_crossing:
extracted['Current crossing method'][i] = river_crossing.group(1)
comment_count += 1
assert comment_count == sum(bridges_df['Bridge Opportunity: Comments'].notna())
df.to_csv('b2p_cleaned.csv')
###Output
_____no_output_____
###Markdown
Engineering FeaturesNow, we'll make new columns for the rest of the values extracted from the comments column.
###Code
new_columns = ['Elevation', 'Cell', 'Average Number of Daily Crossings',
'How long is it impossible/dangerous to cross river',
'Hours to nearest safe bridge/river crossing', 'Hours walking to Hospital',
'Hours walking to Health Center', 'Hours walking to market',
'Hours walking to Primary School']
new_regex = []
###Output
_____no_output_____ |
dev/20_interpret.ipynb | ###Markdown
Interpretation> Classes to build objects to better interpret predictions of a model
###Code
#export
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
#export
_all_ = ["plot_top_losses"]
#export
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr(self, "dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretatio object from a learner"
if dl is None: dl = learn.dbunch.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if not isinstance(self.inputs, tuple): self.inputs = (self.inputs,)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knos how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
learn = synth_learner()
interp = Interpretation.from_learner(learn)
x,y = learn.dbunch.valid_ds.tensors
test_eq(interp.inputs, x)
test_eq(interp.targs, y)
out = learn.model.a * x + learn.model.b
test_eq(interp.preds, out)
test_eq(interp.losses, (out-y)[:,0]**2)
#export
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
cm = ((self.decoded==x[:,None]) & (self.targs==x[:,None,None])).sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Interpretation> Classes to build objects to better interpret predictions of a model
###Code
#export
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
#export
_all_ = ["plot_top_losses"]
#export
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr(self, "dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretatio object from a learner"
if dl is None: dl = learn.dbunch.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if not isinstance(self.inputs, tuple): self.inputs = (self.inputs,)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knos how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
learn = synth_learner()
interp = Interpretation.from_learner(learn)
x,y = learn.dbunch.valid_ds.tensors
test_eq(interp.inputs, x)
test_eq(interp.targs, y)
out = learn.model.a * x + learn.model.b
test_eq(interp.preds, out)
test_eq(interp.losses, (out-y)[:,0]**2)
#export
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
cm = ((self.decoded==x[:,None]) & (self.targs==x[:,None,None])).sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Interpretation> Classes to build objects to better interpret predictions of a model
###Code
#export
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
#export
_all_ = ["plot_top_losses"]
#export
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr(self, "dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretatio object from a learner"
if dl is None: dl = learn.dbunch.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(self.inputs), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knos how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
learn = synth_learner()
interp = Interpretation.from_learner(learn)
x,y = learn.dbunch.valid_ds.tensors
test_eq(*interp.inputs, x)
test_eq(interp.targs, y)
out = learn.model.a * x + learn.model.b
test_eq(interp.preds, out)
test_eq(interp.losses, (out-y)[:,0]**2)
#export
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
cm = ((self.decoded==x[:,None]) & (self.targs==x[:,None,None])).sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Interpretation> Classes to build objects to better interpret predictions of a model
###Code
#export
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
#export
_all_ = ["plot_top_losses"]
#export
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr(self, "dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretatio object from a learner"
if dl is None: dl = learn.dbunch.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(self.inputs), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knos how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
This cell doesn't have an export destination and was ignored:
e
This cell doesn't have an export destination and was ignored:
e
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
|
notebook_de_apresentacao.ipynb | ###Markdown
Apresentação do problemaO problema trabalhado neste notebook se trata de um desafio proposto em um processo seletivo. O desafio é descrito a seguir:A empresa "Ponto Quente" está enfrentando um problema no seu banco de dados. De alguma forma, seus produtos foram categorizados de forma errada e isso precisa ser corrigido. O time de inteligência de dados da empresa conseguiu montar um dataset contendo alguns produtos com a classificação correta. Nesse dataset, encontramos várias informações sobre o produto, inclusive os reviews dos clientes.A empresa "Ponto Quente" deseja um modelo de machine learning que seja capaz de categorizar automaticamente os seus produtos, baseado nas informações dos reviews dos clientes. Mais especificamente, a empresa quer um programa que recebe o modelo treinado junto com um dataframe a ser categorizado. Este dataframe não pode possuir a coluna `product_category`, que é a coluna que fornece a informação da categoria dos produtos. O programa a ser desenvlvido deve retornar o mesmo dataframe de entrada, mas agora com a coluna `product_category`, com as classificações previstas pelo modelo.Para resolver esse problema, foi utilizado o algoritmo de [Naive Beyes](https://www.analyticsvidhya.com/blog/2017/09/naive-bayes-explained/), do tipo Multinomial, que obteve uma acurácia de aproximadamente 73,24%.O tratamento dos dados, preparação, treinamento e validação do modelo foi toda baseada nesses dois notebooks:+ https://github.com/AarohiSingla/Multinomial-Naive-Bayes/blob/master/news_classifier_unseen_input.ipynb+ https://github.com/AarohiSingla/Multinomial-Naive-Bayes/blob/master/youtube_multinomial_naive_bayes.ipynbOutras consultas foram feitas na documentação das bibliotecas e, sobre remoção de emojis, foi consultado essa [dúvida do stackoverflow](https://stackoverflow.com/questions/33404752/removing-emojis-from-a-string-in-python). importando bibliotecasNo próximo código importamos as bibliotecas e métodos utilizados para resolução do desafio. Cada uma delas será descrita abaixo também.
###Code
import pandas as pd
import string, re, nltk
from nltk.corpus import stopwords
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
nltk.download('stopwords') # obtem as stopwords
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
- pandas - manipulação de dataframes- nltk.corpus.stopwords - lista de palavras consideradas "stopwords"- string - manipulação de strings- re - biblioteca python para operação de strings com codificação. No caso iremos trabalhar com a codificação dos emojis.- da biblioteca sklearn: - MultinomialMB - algoritmo de Naive Beyes do tipo multinominal, o mais indicado para multiclassificação - train_test_split - divide os dados em treino e teste - accuracy_score - método utilizado para validar o modelo funções utilizadas no notebookPara ajudar na leitura do notebook e minimizar as linhas de código durante a apresentação, serão feclaradas aqui no início as funções utilizadas na apresentação, inclusive a função `validate` pedida no desafio. O funcionamento dessas funções será explicado ao longo das apresentações.
###Code
def tratamento_reviews(df):
# juntando o conteúdo das duas colunas de texto dos reviews
chars = [(df['review_headline'].iloc[i] + ' ' + df['review_body'].iloc[i])
for i in df.index.to_list()]
df['review_total'] = chars
# preparando uma lista de emojis a serem excluídos
emoji_pattern = re.compile("["
u"\U0001F300-\U0001F5FF" # símbolos
u"\U0001F680-\U0001F6FF" # transporte e símbolos de mapa
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U0000231A-\U000023F3" # relógios e setas
u"\U000026A1-\U000026BE" # relâmpago, cores e bolas de esportes
u"\U00002753-\U00002757" # pontuação
u"\U00002B50" # estrela
u"\U0001F32D-\U0001F37F" # comidas
u"\U0001F3A0-\U0001F3D3" # esportes
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F910-\U0001F93E" # mais emoticons e emojis de esportes
"]+", flags=re.UNICODE)
# preparando para excluir os caracteres indesejados
col_corrigida = [] # lista para armazenar o conteúdo já tratado e a ser colocado na coluna review_total
char_excluir = string.punctuation + string.digits # lista contendo caracteres a serem excluídos: caracteres escpeciais e dígitos
for row in df['review_total']:
temp = [char for char in row if char not in char_excluir] # excluindo digitos e caracteres especiais
text = ''.join(temp).lower()
text = emoji_pattern.sub(r'', text)
for word in stopwords.words('english'):
text.replace(word, '')
col_corrigida.append(text)
df['review_total'] = col_corrigida
return df
def tratamento_categorias(df):
df['num_category'] = df['product_category'].map({'Digital_Ebook_Purchase':0,
'Music':1, 'Video DVD':2, 'Mobile_Apps':3, 'Books':4, 'Electronics':5,
'Toys':6, 'Video Games':7, 'Digital_Video_Download':8, 'Digital_Music_Purchase':9,
'PC':10, 'Camera':11, 'Baby':12, 'Wireless':13, 'Home Entertainment':14, 'Sports':15,
'Musical Instruments':16, 'Lawn and Garden':17, 'Home Improvement':18, 'Home':19,
'Watches':20, 'Video':21, 'Shoes':22, 'Office Products':23, 'Automotive':24,
'Health & Personal Care':25, 'Personal_Care_Appliances':26, 'Software':27,
'Kitchen':28, 'Luggage':29, 'Pet Products':30, 'Beauty':31})
return df
def split_and_vect(df_ML, seed):
x_train, x_test, y_train, y_test = train_test_split(df_ML['review_total'], df_ML['num_category'], random_state=seed)
vect = CountVectorizer(ngram_range=(2,2))
X_train = vect.fit_transform(x_train)
X_test = vect.transform(x_test)
return X_train, X_test, y_train, y_test, vect
def validate(modelo, vect, df_teste):
df_tratado = tratamento_reviews(df_teste) # tratando os textos de reviews
texto_vetorizado = vect.transform(df_tratado['review_total'])
df_tratado['product_category'] = modelo.predict(texto_vetorizado) # realizando predição do modelo e atribuindo a uma nova coluna do df
# por fim, precisamos transformar de volta os tokers numéricos nas classes originais
df_tratado['product_category'] = df_tratado['product_category'].map({0:'Digital_Ebook_Purchase',
1:'Music', 2:'Video DVD', 3:'Mobile_Apps', 4:'Books', 5:'Electronics',
6:'Toys', 7:'Video Games', 8:'Digital_Video_Download', 9:'Digital_Music_Purchase',
10:'PC', 11:'Camera', 12:'Baby', 13:'Wireless', 14:'Home Entertainment', 15:'Sports',
16:'Musical Instruments', 17:'Lawn and Garden', 18:'Home Improvement', 19:'Home',
20:'Watches', 21:'Video', 22:'Shoes', 23:'Office Products', 24:'Automotive',
25:'Health & Personal Care', 26:'Personal_Care_Appliances', 27:'Software',
28:'Kitchen', 29:'Luggage', 30:'Pet Products', 31:'Beauty'})
return df_tratado.drop('review_total', axis=1)
###Output
_____no_output_____
###Markdown
obtenção dos dados e tratamentoOs dados foram fornecidos pela empresa SOLVIMM, que propôs o desafio. Eles correspondem a uma base de dados de produtos cadastrados pela empresa "Ponto Quente".
###Code
arq = 'https://github.com/matheus97eng/desafio_solvimm/blob/main/data/reviews.tsv?raw=true' # repositório do github
df_original = pd.read_csv(arq, sep='\t')
print(df_original.shape)
df_original.head()
###Output
(170583, 16)
###Markdown
Informações das features, exclusão de dados nulos e explicação do modelo:Vamos obter uma visão geral das features em questão.
###Code
display(df_original.info())
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 170583 entries, 0 to 170582
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 170583 non-null int64
1 marketplace 170583 non-null object
2 customer_id 170583 non-null int64
3 review_id 170583 non-null object
4 product_id 170583 non-null object
5 product_parent 170583 non-null int64
6 product_title 170583 non-null object
7 star_rating 170583 non-null int64
8 helpful_votes 170583 non-null int64
9 total_votes 170583 non-null int64
10 vine 170583 non-null object
11 verified_purchase 170583 non-null object
12 review_headline 170583 non-null object
13 review_body 170582 non-null object
14 review_date 170578 non-null object
15 product_category 170583 non-null object
dtypes: int64(6), object(10)
memory usage: 20.8+ MB
###Markdown
O desafio pede para classificar os produtos somente de acordo com os reviews feitos pelos clientes. Esclarecido isso, não precisamos nos preocupar com outras features, a não ser: `review_headline`, que é o título da avaliação, `review_body`, que é a avaliação em si e por fim, `product_category`, que é a categorização corrigida do produto. Aqui não consideraremos relevante a data de postagem do review, nem o ID do review.Dado que as features `review_headline` e `product_category` são todas texto (do tipo caracter), e por se tratar de um problema de classificação (identificar qual a classe que o produto pertence), precisamos de um modelo que utilize NPL (Natural Language Processing). Será escolhido o algoritmo de [Naive Beyes](https://www.analyticsvidhya.com/blog/2017/09/naive-bayes-explained/), pois é um algoritmo de fácil aplicação com python e sklearn, tem uma boa resposta dado uma quantidade pequena de dados e não é um algoritmo pesado. Explicando de uma forma não formal, o algoritmo de Nave Beyes trabalha calculando qual é a probabilidade de um produto ser da classe "X" sendo que no review desse produto encontramos certas "palavras-chave". Treinando o modelo, ele consegue identificar, por exemplo que, se no review aparecer a palavra "bola", é mais provável que o produto seja da categoria "esportes". Com as informações da base de dados, o algoritmo estará treinado e validado para classificar outros produtos, fora da base de dados preparada pelo time de dados da "Ponto Quente". Uma desvantagem desse modelo é que ele não considera a semântica do texto. Por exemplo, ao olhar para a frase "meus dedos ficaram muito apertados quando eu testei na corrida". Analisando o contexto, poderíamos identificar que o produto se trata provavelmente de um tênis. Mas o Nave Beyes analisa palavra por palavra, separadamente, o que tornaria mais difícil a identificação, nesse exemplo dado.No entanto, antes de modelar nosso problema, precisamos tratar os nossos dados.Executando `df_original.info()` já vemos que a feature `review_body` possui uma linha com dado nulo. Isso porque vemos 170582 dados não nulos nessa coluna, enquanto que a nossa base de dados possui 170583 linhas. Vamos optar por excluir toda a linha que contém esse dado nulo, já que atrapalhará no desenvolvimento do modelo e se trata apenas de um produto.
###Code
excluir = df_original[df_original['review_body'].isnull()].index
df_tratado = df_original.drop(excluir).reset_index(drop=True)
df_tratado.head()
###Output
_____no_output_____
###Markdown
Features de review dos clientesHá duas colunas no dataframe com o conteúdo de avaliações dos clientes: `review_headline`, que contém o título da avaliação, e `review_body`, que contém o corpo do review. O que será feito nesse desafio será juntar todas as palavras dessas duas features em uma coluna apenas, já que o contexto do texto não importa no modelo de Naive Beyes. O nome da coluna contendo o texto compactado será `review_total`.Além disso, precisamos fazer a limpeza desse texto. Será tirado todos os caracteres especiais, dígitos e uma lista de emojis. Além disso, serão removidas as chamadas **"stop-words"**, que são basicamente palavras que não nos fornecem muita informação quando analisadas separadamente. São palavras como "I, yourself, the...". A biblioteca `ntlk` possui uma lista dessas palavras. Será usado essa lista como base. Feito a limpeza, garantimos o melhor funcionamento do modelo, que analisará apenas palavras-chave dos dados.**obs.:** não serão excluídos todos os emojis possíveis. A lista de todos os emojis codificados é muito grande e poderia fazer o programa demorar muito para ser executado. Foram escolhidos emojis que são mais prováveis de aparecer em produtos. A lista de emojis a serem excluídos pode ser editada na própria função `tratamento_review`, que faz a limpeza dos textos dos reviews. Tipos de classificaçãoÉ importante também olharmos para os tipos de classificação de produtos que a empresa tem. Na base fornecida, foram identificados 32 classes. É essencial entender que o modelo a ser treinado **não identificará classes de produtos que não estão na base de dados**. Desse modo, se a empresa quisesse identificar um produto como classe "carro" ou não, teria que acrescentar à base de dados vários produtos da categoria "carro".O modelo de machine learning não consegue interpretar dados do tipo string. Portanto, precisamos alterar os dados da coluna alvo `product_category`, fazendo uma tokerização simples, em outras palavras, substituindo as palavras por números. Os valores substituídos serão armazenados em uma coluna chamada `num_category` execução das funções de tratamento do dataframeTodo esse tratamento descrito acima será feito por duas funções: `tratamento_reviews`, que tratará todo o conteúdo dos reviews dos clientes e `tratamento_categorias`, que tratará o conteúdo da coluna `product_category`. Enquanto que a primeira função retorna o dataframe acrescentado da coluna `review_total` (coluna esta que é criada pela próppria função), a segunda função reotornará o dataframe com os dados preparados para ser desenvolvido o modelo. Esse dataframe será chamado `df_ML` e conterá a coluna `review_total`, que será a variável x do modelo, e a coluna `num_category`, que será a variável y.
###Code
df_tratado = tratamento_reviews(df_tratado)
df_ML = tratamento_categorias(df_tratado)
###Output
_____no_output_____
###Markdown
Aplicação do machine learning divisão dos dados em treino e teste / treinamento do modeloApós os tratamentos feitos, vamos separar os dados em treino e teste para o modelo. No entanto, mais uma transformação deverá ser feita na coluna `review_total`. Precisamos fazer a tokerização (ou vetorização) das palavras, além de transformar a coluna em uma matriz esparça, que é a entrada que o método `fit` do modelo `MultinomialNB` aceita. Para isso utilizaremos a classe `CountVectorizer` da biblioteca `sklearn`. Aqui, os dados da vetorização e da transformação em matriz esparça serão armazenados nas variáveis `X_train` e `X_test` (com X maiúsculo). A transformação será feita em cima de `x_train` e `x_test` (com x minúsculo), variáveis que serão preparadas através do método `train_test_split`, também da biblioteca `sklearn`. Estas variáveis são apenas uma separação dos dados de `df_ML`.Todo esse processo, bem como o treinamento do modelo, serão realizados pela função `split_and_vect`, que retornará as matrizes esparças `X_train` e `X_test` e os arrays `y_train` e `y_test`. Além dessas variáveis, a função retornará também a instância `vect`, que será utilizada na função `validate` para tokerizar as palavras dos textos. Os parâmetros que esta função recebe são o dataframe (que deve ser tratado pelas duas funções de tratamento) e o número `seed`, que garante a reprodutibilidade do modelo. Aqui executaremos a função com seed = 50.
###Code
X_train, X_test, y_train, y_test, vect = split_and_vect(df_ML, 50)
modelo = MultinomialNB(alpha=0.2)
modelo.fit(X_train,y_train)
result = modelo.predict(X_test)
print(result)
###Output
[2 2 3 ... 2 4 1]
###Markdown
validação do modeloPara validar o modelo, utilizaremos o proposto pelo desafio da Solvimm, que é calcular a acurácia. Isso será calculado através da biblioteca `sklearn`.O modelo desenvolvido aqui apresenta uma acurácia de aproximadamente 73,24%. Em outras palavras, com a divisão dos dados feitas aqui e com esse modelo treinado, estamos acertando praticamente a classificação de 3 a cada 4 produtos. Essa é uma acurácia maior do que o mínimo esperado no desafio.
###Code
accuracy_score(result,y_test)
###Output
_____no_output_____
###Markdown
Aplicando o modelo: função validatePor fim, após o tratamento, treinamento e validação do modelo, resta desenvolver uma função que aplique nosso modelo a um dataframe, afim de classificar os produtos nele contidos. Para isso, utilizaremos a função `validate`, que recebe o modelo treinado, a função vect e um dataframe como o fornecido pela equipe da "Ponto Quente", mas sem a coluna `product_category`. A função deve retornar o mesmo dataframe de entrada, porém com a coluna `product_category` que terá as categorias previstas para cada produto.Não faz sentido nenhum executar esta função sobre a base de dados fornecida pela "Ponto Quente" para preparar o modelo, já que as classificações dos produtos já foram corrigidas pelo time da empresa. No entanto, utilizaremos a mesma base de dados apenas para verificar o funcionamento da função, uma vez que o modelo já está validado.É importante dizer que, antes de fazer de fato a predição do modelo, o dataframe que a função `validate` recebe precisa ser tratado, assim como fizemos o tratamento dos textos dos reviews antes de treinar o modelo. Mais especificamente, o dataframe precisa passar antes pela função `tratamento_reviews`, o que leva boa parte do tempo de execução de `validate`. Além disso, dentro da função precisa ser feita a tokerização das palavras, por isso o parâmetro `vect` deve ser fornecido. Sem esse parâmetro, não teria como a empresa "Ponto Quente" usar a função `validate` sem executar antes O dataframe que será utilizado como parâmetro de execução da função será `df_tratado`, e não `df_original`, aquele retirado diretamente da base fornecida pelo time de dados. Isso porque **o dataframe que `validate` recebe não pode conter dados nulos nos reviews**. Não será implementado nenhuma linha de código para tratar dados nulos, porque isso seria melhor feito sendo conversado com a empresa. É fácil entender o porque: digamos que a empresa aplique o algoritmo numa dataframe com vários dados nulos. Ela gostaria que simplesmente ignorássemos os produtos que não tiveram reviews ou desejaria que fosse feito um outro tipo de tratamento no algoritmo? Cabe a empresa decidir.
###Code
df_sem_categoria = df_tratado.drop(['product_category', 'review_total'], axis=1).sample(100).reset_index(drop=True)
df_categorizado = validate(modelo, vect, df_sem_categoria)
df_categorizado.head()
###Output
_____no_output_____ |
notebooks/06.00-Widget_Styling.ipynb | ###Markdown
Layout and Styling of Jupyter widgetsThis notebook presents how to layout and style Jupyter interactive widgets to build rich and *reactive* widget-based applications. The `layout` attribute.Jupyter interactive widgets have a `layout` attribute exposing a number of CSS properties that impact how widgets are laid out. Exposed CSS propertiesThe following properties map to the values of the CSS properties of the same name (underscores being replaced with dashes), applied to the top DOM elements of the corresponding widget. Sizes- `height`- `width`- `max_height`- `max_width`- `min_height`- `min_width` Display- `visibility`- `display`- `overflow`- `overflow_x`- `overflow_y` Box model- `border` - `margin`- `padding` Positioning- `top`- `left`- `bottom`- `right` Flexbox- `order`- `flex_flow`- `align_items`- `flex`- `align_self`- `align_content`- `justify_content` Grid layout- `grid_auto_columns`- `grid_auto_flow`- `grid_auto_rows`- `grid_gap`- `grid_template`- `grid_row`- `grid_column` Shorthand CSS propertiesYou may have noticed that certain CSS properties such as `margin-[top/right/bottom/left]` seem to be missing. The same holds for `padding-[top/right/bottom/left]` etc.In fact, you can atomically specify `[top/right/bottom/left]` margins via the `margin` attribute alone by passing the string `'100px 150px 100px 80px'` for a respectively `top`, `right`, `bottom` and `left` margins of `100`, `150`, `100` and `80` pixels.Similarly, the `flex` attribute can hold values for `flex-grow`, `flex-shrink` and `flex-basis`. The `border` attribute is a shorthand property for `border-width`, `border-style (required)`, and `border-color`. Simple examples The following example shows how to resize a `Button` so that its views have a height of `80px` and a width of `50%` of the available space. It also includes an example of setting a CSS property that requires multiple values (a border, in thise case):
###Code
from ipywidgets import Button, Layout
b = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px', border='2px dotted blue'))
b
###Output
_____no_output_____
###Markdown
The `layout` property can be shared between multiple widgets and assigned directly.
###Code
Button(description='Another button with the same layout', layout=b.layout)
###Output
_____no_output_____
###Markdown
Description You may have noticed that long descriptions are truncated. This is because the description length is, by default, fixed.
###Code
from ipywidgets import IntSlider
IntSlider(description='A too long description')
###Output
_____no_output_____
###Markdown
If you need more flexibility to lay out widgets and descriptions, you can use Label widgets directly.
###Code
from ipywidgets import HBox, Label
HBox([Label('A too long description'), IntSlider()])
###Output
_____no_output_____
###Markdown
**Spoiler alert**:You can change the length of the description to fit the description text. However, this will make the widget itself shorter. You can change both by adjusting the description width and the widget width using the widget's style.
###Code
style = {'description_width': 'initial'}
IntSlider(description='A too long description', style=style)
###Output
_____no_output_____
###Markdown
Natural sizes, and arrangements using HBox and VBoxMost of the core-widgets have default heights and widths that tile well together. This allows simple layouts based on the `HBox` and `VBox` helper functions to align naturally:
###Code
from ipywidgets import Button, HBox, VBox
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=w) for w in words]
left_box = VBox([items[0], items[1]])
right_box = VBox([items[2], items[3]])
HBox([left_box, right_box])
###Output
_____no_output_____
###Markdown
LaTeX Widgets such as sliders and text inputs have a description attribute that can render Latex Equations. The `Label` widget also renders Latex equations.
###Code
from ipywidgets import IntSlider, Label
IntSlider(description=r'\(\int_0^t f\)')
Label(value=r'\(e=mc^2\)')
###Output
_____no_output_____
###Markdown
Number formattingSliders have a readout field which can be formatted using Python's [Format Specification Mini-Language](https://docs.python.org/3/library/string.htmlformat-specification-mini-language). If the space available for the readout is too narrow for the string representation of the slider value, a different styling is applied to show that not all digits are visible. The Flexbox layoutThe `HBox` and `VBox` classes above are special cases of the `Box` widget.The `Box` widget enables the entire CSS flexbox spec as well as the Grid layout spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container.Again, the whole flexbox spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. AcknowledgementThe following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Flexbox](https://css-tricks.com/snippets/css/a-guide-to-flexbox/) by Chris Coyier, and uses text and various images from the article [with permission](https://css-tricks.com/license/). Basics and terminologySince flexbox is a whole module and not a single property, it involves a lot of things including its whole set of properties. Some of them are meant to be set on the container (parent element, known as "flex container") whereas the others are meant to be set on the children (known as "flex items").If regular layout is based on both block and inline flow directions, the flex layout is based on "flex-flow directions". Please have a look at this figure from the specification, explaining the main idea behind the flex layout.Basically, items will be laid out following either the `main axis` (from `main-start` to `main-end`) or the `cross axis` (from `cross-start` to `cross-end`).- `main axis` - The main axis of a flex container is the primary axis along which flex items are laid out. Beware, it is not necessarily horizontal; it depends on the flex-direction property (see below).- `main-start | main-end` - The flex items are placed within the container starting from main-start and going to main-end.- `main size` - A flex item's width or height, whichever is in the main dimension, is the item's main size. The flex item's main size property is either the ‘width’ or ‘height’ property, whichever is in the main dimension.cross axis - The axis perpendicular to the main axis is called the cross axis. Its direction depends on the main axis direction.- `cross-start | cross-end` - Flex lines are filled with items and placed into the container starting on the cross-start side of the flex container and going toward the cross-end side.- `cross size` - The width or height of a flex item, whichever is in the cross dimension, is the item's cross size. The cross size property is whichever of ‘width’ or ‘height’ that is in the cross dimension. Properties of the parent display`display` can be `flex` or `inline-flex`. This defines a flex container (block or inline). flex-flow`flex-flow` is a shorthand for the `flex-direction` and `flex-wrap` properties, which together define the flex container's main and cross axes. Default is `row nowrap`.- `flex-direction` (column-reverse | column | row | row-reverse ) This establishes the main-axis, thus defining the direction flex items are placed in the flex container. Flexbox is (aside from optional wrapping) a single-direction layout concept. Think of flex items as primarily laying out either in horizontal rows or vertical columns.- `flex-wrap` (nowrap | wrap | wrap-reverse) By default, flex items will all try to fit onto one line. You can change that and allow the items to wrap as needed with this property. Direction also plays a role here, determining the direction new lines are stacked in. justify-content`justify-content` can be one of `flex-start`, `flex-end`, `center`, `space-between`, `space-around`. This defines the alignment along the main axis. It helps distribute extra free space left over when either all the flex items on a line are inflexible, or are flexible but have reached their maximum size. It also exerts some control over the alignment of items when they overflow the line.  align-items`align-items` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This defines the default behaviour for how flex items are laid out along the cross axis on the current line. Think of it as the justify-content version for the cross-axis (perpendicular to the main-axis).  align-content`align-content` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This aligns a flex container's lines within when there is extra space in the cross-axis, similar to how justify-content aligns individual items within the main-axis.**Note**: this property has no effect when there is only one line of flex items. Properties of the itemsThe flexbox-related CSS properties of the items have no impact if the parent element is not a flexbox container (i.e. has a `display` attribute equal to `flex` or `inline-flex`). orderBy default, flex items are laid out in the source order. However, the `order` property controls the order in which they appear in the flex container. flex`flex` is shorthand for three properties, `flex-grow`, `flex-shrink` and `flex-basis` combined. The second and third parameters (`flex-shrink` and `flex-basis`) are optional. Default is `0 1 auto`. - `flex-grow` This defines the ability for a flex item to grow if necessary. It accepts a unitless value that serves as a proportion. It dictates what amount of the available space inside the flex container the item should take up. If all items have flex-grow set to 1, the remaining space in the container will be distributed equally to all children. If one of the children a value of 2, the remaining space would take up twice as much space as the others (or it will try to, at least).  - `flex-shrink` This defines the ability for a flex item to shrink if necessary. - `flex-basis` This defines the default size of an element before the remaining space is distributed. It can be a length (e.g. `20%`, `5rem`, etc.) or a keyword. The `auto` keyword means *"look at my width or height property"*. align-self`align-self` allows the default alignment (or the one specified by align-items) to be overridden for individual flex items. The VBox and HBox helpersThe `VBox` and `HBox` helper classes provide simple defaults to arrange child widgets in vertical and horizontal boxes. They are roughly equivalent to:```Pythondef VBox(*pargs, **kwargs): """Displays multiple widgets vertically using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.flex_flow = 'column' box.layout.align_items = 'stretch' return boxdef HBox(*pargs, **kwargs): """Displays multiple widgets horizontally using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.align_items = 'stretch' return box``` Examples **Four buttons in a VBox. Items stretch to the maximum width, in a vertical box taking `50%` of the available space.**
###Code
from ipywidgets import Layout, Button, Box
items_layout = Layout( width='auto') # override the default width of the button to 'auto' to let the button grow
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = Box(children=items, layout=box_layout)
box
###Output
_____no_output_____
###Markdown
**Three buttons in an HBox. Items flex proportionally to their weight.**
###Code
from ipywidgets import Layout, Button, Box, VBox
# Items flex proportionally to the weight and the left over space around the text
items_auto = [
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'),
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
]
# Items flex proportionally to the weight
items_0 = [
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'),
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='70%')
box_auto = Box(children=items_auto, layout=box_layout)
box_0 = Box(children=items_0, layout=box_layout)
VBox([box_auto, box_0])
###Output
_____no_output_____
###Markdown
**A more advanced example: a reactive form.**The form is a `VBox` of width '50%'. Each row in the VBox is an HBox, that justifies the content with space between..
###Code
from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
form_items = [
Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout),
Box([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
Box([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
Box([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = Box(form_items, layout=Layout(
display='flex',
flex_flow='column',
border='solid 2px',
align_items='stretch',
width='50%'
))
form
###Output
_____no_output_____
###Markdown
**A more advanced example: a carousel.**
###Code
from ipywidgets import Layout, Button, Box, Label
item_layout = Layout(height='100px', min_width='40px')
items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)]
box_layout = Layout(overflow_x='scroll',
border='3px solid black',
width='500px',
height='',
flex_flow='row',
display='flex')
carousel = Box(children=items, layout=box_layout)
VBox([Label('Scroll horizontally:'), carousel])
###Output
_____no_output_____
###Markdown
A widget for exploring layout optionsThe widgets below was written by ipywidgets user [Doug Redden (@DougRzz)](https://github.com/DougRzz). If you want to look through the source code to see how it works, take a look at this [notebook he contributed](cssJupyterWidgetStyling-UI.ipynb).Use the dropdowns and sliders in the widget to change the layout of the box containing the five colored buttons. Many of the CSS layout optoins described above are available, and the Python code to generate a `Layout` object reflecting the settings is in a `TextArea` in the widget.
###Code
from layout_preview import layout
layout
###Output
_____no_output_____
###Markdown
Predefined stylesIf you wish the styling of widgets to make use of colors and styles defined by the environment (to be consistent with e.g. a notebook theme), many widgets enable choosing in a list of pre-defined styles.For example, the `Button` widget has a `button_style` attribute that may take 5 different values: - `'primary'` - `'success'` - `'info'` - `'warning'` - `'danger'`besides the default empty string ''.
###Code
from ipywidgets import Button
Button(description='Danger Button', button_style='danger')
###Output
_____no_output_____
###Markdown
The `style` attributeWhile the `layout` attribute only exposes layout-related CSS properties for the top-level DOM element of widgets, the `style` attribute is used to expose non-layout related styling attributes of widgets.However, the properties of the `style` attribute are specific to each widget type.
###Code
b1 = Button(description='Custom color')
b1.style.button_color = 'lightgreen'
b1
###Output
_____no_output_____
###Markdown
You can get a list of the style attributes for a widget with the `keys` property.
###Code
b1.style.keys
###Output
_____no_output_____
###Markdown
Just like the `layout` attribute, widget styles can be assigned to other widgets.
###Code
b2 = Button()
b2.style = b1.style
b2
###Output
_____no_output_____
###Markdown
Widget styling attributes are specific to each widget type.
###Code
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
###Output
_____no_output_____
###Markdown
Layout and Styling of Jupyter widgetsThis notebook presents how to layout and style Jupyter interactive widgets to build rich and *reactive* widget-based applications. The `layout` attribute.Jupyter interactive widgets have a `layout` attribute exposing a number of CSS properties that impact how widgets are laid out. Exposed CSS propertiesThe following properties map to the values of the CSS properties of the same name (underscores being replaced with dashes), applied to the top DOM elements of the corresponding widget. Sizes- `height`- `width`- `max_height`- `max_width`- `min_height`- `min_width` Display- `visibility`- `display`- `overflow`- `overflow_x` (deprecated in `7.5`, use `overflow` instead)- `overflow_y` (deprecated in `7.5`, use `overflow` instead) Box model- `border` - `margin`- `padding` Positioning- `top`- `left`- `bottom`- `right` Flexbox- `order`- `flex_flow`- `align_items`- `flex`- `align_self`- `align_content`- `justify_content` Grid layout- `grid_auto_columns`- `grid_auto_flow`- `grid_auto_rows`- `grid_gap`- `grid_template`- `grid_row`- `grid_column` Shorthand CSS propertiesYou may have noticed that certain CSS properties such as `margin-[top/right/bottom/left]` seem to be missing. The same holds for `padding-[top/right/bottom/left]` etc.In fact, you can atomically specify `[top/right/bottom/left]` margins via the `margin` attribute alone by passing the string `'100px 150px 100px 80px'` for a respectively `top`, `right`, `bottom` and `left` margins of `100`, `150`, `100` and `80` pixels.Similarly, the `flex` attribute can hold values for `flex-grow`, `flex-shrink` and `flex-basis`. The `border` attribute is a shorthand property for `border-width`, `border-style (required)`, and `border-color`. Simple examples The following example shows how to resize a `Button` so that its views have a height of `80px` and a width of `50%` of the available space. It also includes an example of setting a CSS property that requires multiple values (a border, in thise case):
###Code
from ipywidgets import Button, Layout
b = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px', border='2px dotted blue'))
b
###Output
_____no_output_____
###Markdown
The `layout` property can be shared between multiple widgets and assigned directly.
###Code
Button(description='Another button with the same layout', layout=b.layout)
###Output
_____no_output_____
###Markdown
Description You may have noticed that long descriptions are truncated. This is because the description length is, by default, fixed.
###Code
from ipywidgets import IntSlider
IntSlider(description='A too long description')
###Output
_____no_output_____
###Markdown
If you need more flexibility to lay out widgets and descriptions, you can use Label widgets directly.
###Code
from ipywidgets import HBox, Label
HBox([Label('A too long description'), IntSlider()])
###Output
_____no_output_____
###Markdown
You can change the length of the description to fit the description text. However, this will make the widget itself shorter. You can change both by adjusting the description width and the widget width using the widget's style.
###Code
style = {'description_width': 'initial'}
IntSlider(description='A too long description', style=style)
###Output
_____no_output_____
###Markdown
Natural sizes, and arrangements using HBox and VBoxMost of the core-widgets have default heights and widths that tile well together. This allows simple layouts based on the `HBox` and `VBox` helper functions to align naturally:
###Code
from ipywidgets import Button, HBox, VBox
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=w) for w in words]
left_box = VBox([items[0], items[1]])
right_box = VBox([items[2], items[3]])
HBox([left_box, right_box])
###Output
_____no_output_____
###Markdown
LaTeX Widgets such as sliders and text inputs have a description attribute that can render Latex Equations. The `Label` widget also renders Latex equations.
###Code
from ipywidgets import IntSlider, Label
IntSlider(description=r'\(\int_0^t f\)')
Label(value=r'\(e=mc^2\)')
###Output
_____no_output_____
###Markdown
Number formattingSliders have a readout field which can be formatted using Python's [Format Specification Mini-Language](https://docs.python.org/3/library/string.htmlformat-specification-mini-language). If the space available for the readout is too narrow for the string representation of the slider value, a different styling is applied to show that not all digits are visible. The Flexbox layoutThe `HBox` and `VBox` classes above are special cases of the `Box` widget.The `Box` widget enables the entire CSS flexbox spec as well as the Grid layout spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container.Again, the whole flexbox spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. AcknowledgementThe following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Flexbox](https://css-tricks.com/snippets/css/a-guide-to-flexbox/) by Chris Coyier, and uses text and various images from the article [with permission](https://css-tricks.com/license/). Basics and terminologySince flexbox is a whole module and not a single property, it involves a lot of things including its whole set of properties. Some of them are meant to be set on the container (parent element, known as "flex container") whereas the others are meant to be set on the children (known as "flex items").If regular layout is based on both block and inline flow directions, the flex layout is based on "flex-flow directions". Please have a look at this figure from the specification, explaining the main idea behind the flex layout.Basically, items will be laid out following either the `main axis` (from `main-start` to `main-end`) or the `cross axis` (from `cross-start` to `cross-end`).- `main axis` - The main axis of a flex container is the primary axis along which flex items are laid out. Beware, it is not necessarily horizontal; it depends on the flex-direction property (see below).- `main-start | main-end` - The flex items are placed within the container starting from main-start and going to main-end.- `main size` - A flex item's width or height, whichever is in the main dimension, is the item's main size. The flex item's main size property is either the ‘width’ or ‘height’ property, whichever is in the main dimension.cross axis - The axis perpendicular to the main axis is called the cross axis. Its direction depends on the main axis direction.- `cross-start | cross-end` - Flex lines are filled with items and placed into the container starting on the cross-start side of the flex container and going toward the cross-end side.- `cross size` - The width or height of a flex item, whichever is in the cross dimension, is the item's cross size. The cross size property is whichever of ‘width’ or ‘height’ that is in the cross dimension. Properties of the parent display`display` can be `flex` or `inline-flex`. This defines a flex container (block or inline). flex-flow`flex-flow` is a shorthand for the `flex-direction` and `flex-wrap` properties, which together define the flex container's main and cross axes. Default is `row nowrap`.- `flex-direction` (column-reverse | column | row | row-reverse ) This establishes the main-axis, thus defining the direction flex items are placed in the flex container. Flexbox is (aside from optional wrapping) a single-direction layout concept. Think of flex items as primarily laying out either in horizontal rows or vertical columns.- `flex-wrap` (nowrap | wrap | wrap-reverse) By default, flex items will all try to fit onto one line. You can change that and allow the items to wrap as needed with this property. Direction also plays a role here, determining the direction new lines are stacked in. justify-content`justify-content` can be one of `flex-start`, `flex-end`, `center`, `space-between`, `space-around`. This defines the alignment along the main axis. It helps distribute extra free space left over when either all the flex items on a line are inflexible, or are flexible but have reached their maximum size. It also exerts some control over the alignment of items when they overflow the line.  align-items`align-items` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This defines the default behaviour for how flex items are laid out along the cross axis on the current line. Think of it as the justify-content version for the cross-axis (perpendicular to the main-axis).  align-content`align-content` can be one of `flex-start`, `flex-end`, `center`, `baseline`, `stretch`. This aligns a flex container's lines within when there is extra space in the cross-axis, similar to how justify-content aligns individual items within the main-axis.**Note**: this property has no effect when there is only one line of flex items. Properties of the itemsThe flexbox-related CSS properties of the items have no impact if the parent element is not a flexbox container (i.e. has a `display` attribute equal to `flex` or `inline-flex`). orderBy default, flex items are laid out in the source order. However, the `order` property controls the order in which they appear in the flex container. flex`flex` is shorthand for three properties, `flex-grow`, `flex-shrink` and `flex-basis` combined. The second and third parameters (`flex-shrink` and `flex-basis`) are optional. Default is `0 1 auto`. - `flex-grow` This defines the ability for a flex item to grow if necessary. It accepts a unitless value that serves as a proportion. It dictates what amount of the available space inside the flex container the item should take up. If all items have flex-grow set to 1, the remaining space in the container will be distributed equally to all children. If one of the children a value of 2, the remaining space would take up twice as much space as the others (or it will try to, at least).  - `flex-shrink` This defines the ability for a flex item to shrink if necessary. - `flex-basis` This defines the default size of an element before the remaining space is distributed. It can be a length (e.g. `20%`, `5rem`, etc.) or a keyword. The `auto` keyword means *"look at my width or height property"*. align-self`align-self` allows the default alignment (or the one specified by align-items) to be overridden for individual flex items. The VBox and HBox helpersThe `VBox` and `HBox` helper classes provide simple defaults to arrange child widgets in vertical and horizontal boxes. They are roughly equivalent to:```Pythondef VBox(*pargs, **kwargs): """Displays multiple widgets vertically using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.flex_flow = 'column' box.layout.align_items = 'stretch' return boxdef HBox(*pargs, **kwargs): """Displays multiple widgets horizontally using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.align_items = 'stretch' return box``` Examples **Four buttons in a VBox. Items stretch to the maximum width, in a vertical box taking `50%` of the available space.**
###Code
from ipywidgets import Layout, Button, Box
items_layout = Layout( width='auto') # override the default width of the button to 'auto' to let the button grow
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = Box(children=items, layout=box_layout)
box
###Output
_____no_output_____
###Markdown
**Three buttons in an HBox. Items flex proportionally to their weight.**
###Code
from ipywidgets import Layout, Button, Box, VBox
# Items flex proportionally to the weight and the left over space around the text
items_auto = [
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'),
Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'),
]
# Items flex proportionally to the weight
items_0 = [
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'),
Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'),
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='70%')
box_auto = Box(children=items_auto, layout=box_layout)
box_0 = Box(children=items_0, layout=box_layout)
VBox([box_auto, box_0])
###Output
_____no_output_____
###Markdown
**A more advanced example: a reactive form.**The form is a `VBox` of width '50%'. Each row in the VBox is an HBox, that justifies the content with space between..
###Code
from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
form_items = [
Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout),
Box([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
Box([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
Box([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = Box(form_items, layout=Layout(
display='flex',
flex_flow='column',
border='solid 2px',
align_items='stretch',
width='50%'
))
form
###Output
_____no_output_____
###Markdown
**A more advanced example: a carousel.**
###Code
from ipywidgets import Layout, Button, Box, Label
item_layout = Layout(height='100px', min_width='40px')
items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)]
box_layout = Layout(overflow_x='scroll',
border='3px solid black',
width='500px',
height='',
flex_flow='row',
display='flex')
carousel = Box(children=items, layout=box_layout)
VBox([Label('Scroll horizontally:'), carousel])
###Output
_____no_output_____
###Markdown
*Compatibility note*The `overflow_x` and `overflow_y` options are deprecated in ipywidgets `7.5`. Instead, use the shorthand property `overflow='scroll hidden'`. The first part specificies overflow in `x`, the second the overflow in `y`. A widget for exploring layout optionsThe widgets below was written by ipywidgets user [Doug Redden (@DougRzz)](https://github.com/DougRzz). If you want to look through the source code to see how it works, take a look at this [notebook he contributed](cssJupyterWidgetStyling-UI.ipynb).Use the dropdowns and sliders in the widget to change the layout of the box containing the five colored buttons. Many of the CSS layout optoins described above are available, and the Python code to generate a `Layout` object reflecting the settings is in a `TextArea` in the widget.
###Code
from layout_preview import layout
layout
###Output
_____no_output_____
###Markdown
Predefined stylesIf you wish the styling of widgets to make use of colors and styles defined by the environment (to be consistent with e.g. a notebook theme), some widgets enable choosing in a list of pre-defined styles.For example, the `Button` widget has a `button_style` attribute that may take 5 different values: - `'primary'` - `'success'` - `'info'` - `'warning'` - `'danger'`besides the default empty string ''.
###Code
from ipywidgets import Button
Button(description='Danger Button', button_style='danger')
###Output
_____no_output_____
###Markdown
The `style` attributeWhile the `layout` attribute only exposes layout-related CSS properties for the top-level DOM element of widgets, the `style` attribute is used to expose non-layout related styling attributes of widgets.However, the properties of the `style` attribute are specific to each widget type.
###Code
b1 = Button(description='Custom color')
b1.style.button_color = 'lightgreen'
b1
###Output
_____no_output_____
###Markdown
You can get a list of the style attributes for a widget with the `keys` property.
###Code
b1.style.keys
###Output
_____no_output_____
###Markdown
Just like the `layout` attribute, widget styles can be assigned to other widgets.
###Code
b2 = Button()
b2.style = b1.style
b2
###Output
_____no_output_____
###Markdown
Widget styling attributes are specific to each widget type.
###Code
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
###Output
_____no_output_____
###Markdown
There is a [list of all style keys](Table%20of%20widget%20keys%20and%20style%20keys.ipynbStyle-keys). The Grid layoutThe `GridBox` class is a special case of the `Box` widget.The `Box` widget enables the entire CSS flexbox spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container.Again, the whole grid layout spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items.The following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Grid](https://css-tricks.com/snippets/css/complete-guide-grid/) by Chris House, and uses text and various images from the article [with permission](https://css-tricks.com/license/). Basics and browser supportTo get started you have to define a container element as a grid with display: grid, set the column and row sizes with grid-template-rows, grid-template-columns, and grid_template_areas, and then place its child elements into the grid with grid-column and grid-row. Similarly to flexbox, the source order of the grid items doesn't matter. Your CSS can place them in any order, which makes it super easy to rearrange your grid with media queries. Imagine defining the layout of your entire page, and then completely rearranging it to accommodate a different screen width all with only a couple lines of CSS. Grid is one of the most powerful CSS modules ever introduced.As of March 2017, most browsers shipped native, unprefixed support for CSS Grid: Chrome (including on Android), Firefox, Safari (including on iOS), and Opera. Internet Explorer 10 and 11 on the other hand support it, but it's an old implementation with an outdated syntax. The time to build with grid is now! Important terminologyBefore diving into the concepts of Grid it's important to understand the terminology. Since the terms involved here are all kinda conceptually similar, it's easy to confuse them with one another if you don't first memorize their meanings defined by the Grid specification. But don't worry, there aren't many of them.**Grid Container**The element on which `display: grid` is applied. It's the direct parent of all the grid items. In this example container is the grid container.```html ```**Grid Item**The children (e.g. direct descendants) of the grid container. Here the item elements are grid items, but sub-item isn't.```html ```**Grid Line**The dividing lines that make up the structure of the grid. They can be either vertical ("column grid lines") or horizontal ("row grid lines") and reside on either side of a row or column. Here the yellow line is an example of a column grid line.**Grid Track**The space between two adjacent grid lines. You can think of them like the columns or rows of the grid. Here's the grid track between the second and third row grid lines.**Grid Cell**The space between two adjacent row and two adjacent column grid lines. It's a single "unit" of the grid. Here's the grid cell between row grid lines 1 and 2, and column grid lines 2 and 3.**Grid Area**The total space surrounded by four grid lines. A grid area may be comprised of any number of grid cells. Here's the grid area between row grid lines 1 and 3, and column grid lines 1 and 3. Properties of the parent**grid-template-rows, grid-template-colums**Defines the columns and rows of the grid with a space-separated list of values. The values represent the track size, and the space between them represents the grid line.Values:- `` - can be a length, a percentage, or a fraction of the free space in the grid (using the `fr` unit)- `` - an arbitrary name of your choosing**grid-template-areas** Defines a grid template by referencing the names of the grid areas which are specified with the grid-area property. Repeating the name of a grid area causes the content to span those cells. A period signifies an empty cell. The syntax itself provides a visualization of the structure of the grid.Values:- `` - the name of a grid area specified with `grid-area`- `.` - a period signifies an empty grid cell- `none` - no grid areas are defined**grid-gap** A shorthand for `grid-row-gap` and `grid-column-gap`Values:- ``, `` - length valueswhere `grid-row-gap` and `grid-column-gap` specify the sizes of the grid lines. You can think of it like setting the width of the gutters between the columns / rows.- `` - a length value*Note: The `grid-` prefix will be removed and `grid-gap` renamed to `gap`. The unprefixed property is already supported in Chrome 68+, Safari 11.2 Release 50+ and Opera 54+.***align-items**Aligns grid items along the block (column) axis (as opposed to justify-items which aligns along the inline (row) axis). This value applies to all grid items inside the container.Values:- `start` - aligns items to be flush with the start edge of their cell- `end` - aligns items to be flush with the end edge of their cell- `center` - aligns items in the center of their cell- `stretch` - fills the whole height of the cell (this is the default)**justify-items**Aligns grid items along the inline (row) axis (as opposed to `align-items` which aligns along the block (column) axis). This value applies to all grid items inside the container.Values:- `start` - aligns items to be flush with the start edge of their cell- `end` - aligns items to be flush with the end edge of their cell- `center` - aligns items in the center of their cell- `stretch` - fills the whole width of the cell (this is the default)**align-content**Sometimes the total size of your grid might be less than the size of its grid container. This could happen if all of your grid items are sized with non-flexible units like `px`. In this case you can set the alignment of the grid within the grid container. This property aligns the grid along the block (column) axis (as opposed to justify-content which aligns the grid along the inline (row) axis).Values:- `start` - aligns the grid to be flush with the start edge of the grid container- `end` - aligns the grid to be flush with the end edge of the grid container- `center` - aligns the grid in the center of the grid container- `stretch` - resizes the grid items to allow the grid to fill the full height of the grid container- `space-around` - places an even amount of space between each grid item, with half-sized spaces on the far ends- `space-between` - places an even amount of space between each grid item, with no space at the far ends- `space-evenly` - places an even amount of space between each grid item, including the far ends**justify-content**Sometimes the total size of your grid might be less than the size of its grid container. This could happen if all of your grid items are sized with non-flexible units like `px`. In this case you can set the alignment of the grid within the grid container. This property aligns the grid along the inline (row) axis (as opposed to align-content which aligns the grid along the block (column) axis).Values:- `start` - aligns the grid to be flush with the start edge of the grid container- `end` - aligns the grid to be flush with the end edge of the grid container- `center` - aligns the grid in the center of the grid container- `stretch` - resizes the grid items to allow the grid to fill the full width of the grid container- `space-around` - places an even amount of space between each grid item, with half-sized spaces on the far ends- `space-between` - places an even amount of space between each grid item, with no space at the far ends- `space-evenly` - places an even amount of space between each grid item, including the far ends**grid-auto-columns, grid-auto-rows**Specifies the size of any auto-generated grid tracks (aka implicit grid tracks). Implicit tracks get created when there are more grid items than cells in the grid or when a grid item is placed outside of the explicit grid. (see The Difference Between Explicit and Implicit Grids)Values:- `` - can be a length, a percentage, or a fraction of the free space in the grid (using the `fr` unit) Properties of the items*Note: `float`, `display: inline-block`, `display: table-cell`, `vertical-align` and `column-??` properties have no effect on a grid item.***grid-column, grid-row**Determines a grid item's location within the grid by referring to specific grid lines. `grid-column-start`/`grid-row-start` is the line where the item begins, and `grid-column-end`/`grid-row-end` is the line where the item ends.Values:- `` - can be a number to refer to a numbered grid line, or a name to refer to a named grid line- `span ` - the item will span across the provided number of grid tracks- `span ` - the item will span across until it hits the next line with the provided name- `auto` - indicates auto-placement, an automatic span, or a default span of one```css.item { grid-column: | | span | span | auto / | | span | span | auto grid-row: | | span | span | auto / | | span | span | auto}```Examples:```css.item-a { grid-column: 2 / five; grid-row: row1-start / 3;}``````css.item-b { grid-column: 1 / span col4-start; grid-row: 2 / span 2;}```If no `grid-column` / `grid-row` is declared, the item will span 1 track by default.Items can overlap each other. You can use `z-index` to control their stacking order.**grid-area**Gives an item a name so that it can be referenced by a template created with the `grid-template-areas` property. Alternatively, this property can be used as an even shorter shorthand for `grid-row-start` + `grid-column-start` + `grid-row-end` + `grid-column-end`.Values:- `` - a name of your choosing- ` / / / ` - can be numbers or named lines```css.item { grid-area: | / / / ;}```Examples:As a way to assign a name to the item:```css.item-d { grid-area: header}```As the short-shorthand for `grid-row-start` + `grid-column-start` + `grid-row-end` + `grid-column-end`:```css.item-d { grid-area: 1 / col4-start / last-line / 6}```**justify-self**Aligns a grid item inside a cell along the inline (row) axis (as opposed to `align-self` which aligns along the block (column) axis). This value applies to a grid item inside a single cell.Values:- `start` - aligns the grid item to be flush with the start edge of the cell- `end` - aligns the grid item to be flush with the end edge of the cell- `center` - aligns the grid item in the center of the cell- `stretch` - fills the whole width of the cell (this is the default)```css.item { justify-self: start | end | center | stretch;}```Examples:```css.item-a { justify-self: start;}``````css.item-a { justify-self: end;}``````css.item-a { justify-self: center;}``````css.item-a { justify-self: stretch;}```To set alignment for *all* the items in a grid, this behavior can also be set on the grid container via the `justify-items` property.
###Code
from ipywidgets import Button, GridBox, Layout, ButtonStyle
###Output
_____no_output_____
###Markdown
Placing items by name:
###Code
header = Button(description='Header',
layout=Layout(width='auto', grid_area='header'),
style=ButtonStyle(button_color='lightblue'))
main = Button(description='Main',
layout=Layout(width='auto', grid_area='main'),
style=ButtonStyle(button_color='moccasin'))
sidebar = Button(description='Sidebar',
layout=Layout(width='auto', grid_area='sidebar'),
style=ButtonStyle(button_color='salmon'))
footer = Button(description='Footer',
layout=Layout(width='auto', grid_area='footer'),
style=ButtonStyle(button_color='olive'))
GridBox(children=[header, main, sidebar, footer],
layout=Layout(
width='50%',
grid_template_rows='auto auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"header header header header"
"main main . sidebar "
"footer footer footer footer"
''')
)
###Output
_____no_output_____
###Markdown
Setting up row and column template and gap
###Code
GridBox(children=[Button(layout=Layout(width='auto', height='auto'),
style=ButtonStyle(button_color='darkseagreen')) for i in range(9)
],
layout=Layout(
width='50%',
grid_template_columns='100px 50px 100px',
grid_template_rows='80px auto 80px',
grid_gap='5px 10px')
)
###Output
_____no_output_____
###Markdown
Layout and Styling of Jupyter widgetsThis notebook presents how to layout and style Jupyter interactive widgets to build rich and *reactive* widget-based applications. The `layout` attribute.Jupyter interactive widgets have a `layout` attribute exposing a number of CSS properties that impact how widgets are laid out. Exposed CSS propertiesThe following properties map to the values of the CSS properties of the same name (underscores being replaced with dashes), applied to the top DOM elements of the corresponding widget.** Sizes **- `height`- `width`- `max_height`- `max_width`- `min_height`- `min_width`** Display **- `visibility`- `display`- `overflow`- `overflow_x`- `overflow_y`** Box model **- `border` - `margin`- `padding`** Positioning **- `top`- `left`- `bottom`- `right`** Flexbox **- `order`- `flex_flow`- `align_items`- `flex`- `align_self`- `align_content`- `justify_content` Shorthand CSS propertiesYou may have noticed that certain CSS properties such as `margin-[top/right/bottom/left]` seem to be missing. The same holds for `padding-[top/right/bottom/left]` etc.In fact, you can atomically specify `[top/right/bottom/left]` margins via the `margin` attribute alone by passing the string```margin: 100px 150px 100px 80px;```for a respectively `top`, `right`, `bottom` and `left` margins of `100`, `150`, `100` and `80` pixels.Similarly, the `flex` attribute can hold values for `flex-grow`, `flex-shrink` and `flex-basis`. The `border` attribute is a shorthand property for `border-width`, `border-style (required)`, and `border-color`. Simple examples The following example shows how to resize a `Button` so that its views have a height of `80px` and a width of `50%` of the available space. It also includes an example of setting a CSS property that requires multiple values (a border, in thise case):
###Code
from ipywidgets import Button, Layout
b = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px', border='2px dotted blue'))
b
###Output
_____no_output_____
###Markdown
The `layout` property can be shared between multiple widgets and assigned directly.
###Code
Button(description='Another button with the same layout', layout=b.layout)
###Output
_____no_output_____
###Markdown
Description You may have noticed that long descriptions are truncated. This is because the description length is, by default, fixed.
###Code
from ipywidgets import IntSlider
IntSlider(description='A too long description')
###Output
_____no_output_____
###Markdown
If you need more flexibility to lay out widgets and descriptions, you can use Label widgets directly.
###Code
from ipywidgets import HBox, Label
HBox([Label('A too long description'), IntSlider()])
###Output
_____no_output_____
###Markdown
**Spoiler alert**:You can change the length of the description to fit the description text. However, this will make the widget itself shorter. You can change both by adjusting the description width and the widget width using the widget's style.
###Code
style = {'description_width': 'initial'}
IntSlider(description='A too long description', style=style)
###Output
_____no_output_____
###Markdown
Natural sizes, and arrangements using HBox and VBoxMost of the core-widgets have - a natural width that is a multiple of `148` pixels- a natural height of `32` pixels or a multiple of that number.- a default margin of `2` pixelswhich will be the ones used when it is not specified in the `layout` attribute.This allows simple layouts based on the `HBox` and `VBox` helper functions to align naturally:
###Code
from ipywidgets import Button, HBox, VBox
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=w) for w in words]
left_box = VBox([items[0], items[1]])
right_box = VBox([items[2], items[3]])
HBox([left_box, right_box])
###Output
_____no_output_____
###Markdown
LaTeX Widgets such as sliders and text inputs have a description attribute that can render $\LaTeX$ Equations. The `Label` widget also renders $\LaTeX$ equations.
###Code
from ipywidgets import IntSlider, Label
IntSlider(description=r'\(\int_0^t f\)')
Label(value=r'\(e=mc^2\)')
###Output
_____no_output_____
###Markdown
Number formattingSliders have a readout field which can be formatted using Python's *[Format Specification Mini-Language](https://docs.python.org/3/library/string.htmlformat-specification-mini-language)*. If the space available for the readout is too narrow for the string representation of the slider value, a different styling is applied to show that not all digits are visible. The Flexbox layoutIn fact, the `HBox` and `VBox` helpers used above are functions returning instances of the `Box` widget with specific options.The `Box` widgets enables the entire CSS Flexbox spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container.Again, the whole Flexbox spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. AcknowledgementThe following tutorial on the Flexbox layout follows the lines of the article *[A Complete Guide to Flexbox](https://css-tricks.com/snippets/css/a-guide-to-flexbox/)* by Chris Coyier. Basics and terminologySince flexbox is a whole module and not a single property, it involves a lot of things including its whole set of properties. Some of them are meant to be set on the container (parent element, known as "flex container") whereas the others are meant to be set on the children (said "flex items").If regular layout is based on both block and inline flow directions, the flex layout is based on "flex-flow directions". Please have a look at this figure from the specification, explaining the main idea behind the flex layout.Basically, items will be laid out following either the `main axis` (from `main-start` to `main-end`) or the `cross axis` (from `cross-start` to `cross-end`).- `main axis` - The main axis of a flex container is the primary axis along which flex items are laid out. Beware, it is not necessarily horizontal; it depends on the flex-direction property (see below).- `main-start | main-end` - The flex items are placed within the container starting from main-start and going to main-end.- `main size` - A flex item's width or height, whichever is in the main dimension, is the item's main size. The flex item's main size property is either the ‘width’ or ‘height’ property, whichever is in the main dimension.cross axis - The axis perpendicular to the main axis is called the cross axis. Its direction depends on the main axis direction.- `cross-start | cross-end` - Flex lines are filled with items and placed into the container starting on the cross-start side of the flex container and going toward the cross-end side.- `cross size` - The width or height of a flex item, whichever is in the cross dimension, is the item's cross size. The cross size property is whichever of ‘width’ or ‘height’ that is in the cross dimension. Properties of the parent- `display` (must be equal to 'flex' or 'inline-flex') This defines a flex container (inline or block).- `flex-flow` **(shorthand for two properties)** This is a shorthand `flex-direction` and `flex-wrap` properties, which together define the flex container's main and cross axes. Default is `row nowrap`. - `flex-direction` (column-reverse | column | row | row-reverse | ) This establishes the main-axis, thus defining the direction flex items are placed in the flex container. Flexbox is (aside from optional wrapping) a single-direction layout concept. Think of flex items as primarily laying out either in horizontal rows or vertical columns.  - `flex-wrap` (nowrap | wrap | wrap-reverse) By default, flex items will all try to fit onto one line. You can change that and allow the items to wrap as needed with this property. Direction also plays a role here, determining the direction new lines are stacked in. - `justify-content` (flex-start | flex-end | center | space-between | space-around) This defines the alignment along the main axis. It helps distribute extra free space left over when either all the flex items on a line are inflexible, or are flexible but have reached their maximum size. It also exerts some control over the alignment of items when they overflow the line. - `align-items` (flex-start | flex-end | center | baseline | stretch) This defines the default behaviour for how flex items are laid out along the cross axis on the current line. Think of it as the justify-content version for the cross-axis (perpendicular to the main-axis).  - `align-content` (flex-start | flex-end | center | baseline | stretch) This aligns a flex container's lines within when there is extra space in the cross-axis, similar to how justify-content aligns individual items within the main-axis.  **Note**: this property has no effect when there is only one line of flex items. Properties of the itemsThe flexbox-related CSS properties of the items have no impact if the parent element is not a flexbox container (i.e. has a `display` attribute equal to `flex` or `inline-flex`).- `order` By default, flex items are laid out in the source order. However, the order property controls the order in which they appear in the flex container. - `flex` **(shorthand for three properties)** This is the shorthand for flex-grow, flex-shrink and flex-basis combined. The second and third parameters (flex-shrink and flex-basis) are optional. Default is `0 1 auto`. - `flex-grow` This defines the ability for a flex item to grow if necessary. It accepts a unitless value that serves as a proportion. It dictates what amount of the available space inside the flex container the item should take up. If all items have flex-grow set to 1, the remaining space in the container will be distributed equally to all children. If one of the children a value of 2, the remaining space would take up twice as much space as the others (or it will try to, at least).  - `flex-shrink` This defines the ability for a flex item to shrink if necessary. - `flex-basis` This defines the default size of an element before the remaining space is distributed. It can be a length (e.g. `20%`, `5rem`, etc.) or a keyword. The `auto` keyword means *"look at my width or height property"*. - `align-self` This allows the default alignment (or the one specified by align-items) to be overridden for individual flex items.  The VBox and HBox helpersThe `VBox` and `HBox` helper classes provide simple defaults to arrange child widgets in vertical and horizontal boxes. They are roughly equivalent to:```Pythondef VBox(*pargs, **kwargs): """Displays multiple widgets vertically using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.flex_flow = 'column' box.layout.align_items = 'stretch' return boxdef HBox(*pargs, **kwargs): """Displays multiple widgets horizontally using the flexible box model.""" box = Box(*pargs, **kwargs) box.layout.display = 'flex' box.layout.align_items = 'stretch' return box``` Examples **Four buttons in a VBox. Items stretch to the maximum width, in a vertical box taking `50%` of the available space.**
###Code
from ipywidgets import Layout, Button, Box
items_layout = Layout(flex='1 1 auto',
width='auto') # override the default width of the button to 'auto' to let the button grow
box_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=w, layout=items_layout, button_style='danger') for w in words]
box = Box(children=items, layout=box_layout)
box
###Output
_____no_output_____
###Markdown
**Three buttons in an HBox. Items flex proportionaly to their weight.**
###Code
from ipywidgets import Layout, Button, Box
items = [
Button(description='weight=1'),
Button(description='weight=2', layout=Layout(flex='2 1 auto', width='auto')),
Button(description='weight=1'),
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
border='solid',
width='50%')
box = Box(children=items, layout=box_layout)
box
###Output
_____no_output_____
###Markdown
**A more advanced example: a reactive form.**The form is a `VBox` of width '50%'. Each row in the VBox is an HBox, that justifies the content with space between..
###Code
from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
form_items = [
Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout),
Box([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
Box([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
Box([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = Box(form_items, layout=Layout(
display='flex',
flex_flow='column',
border='solid 2px',
align_items='stretch',
width='50%'
))
form
###Output
_____no_output_____
###Markdown
**A more advanced example: a carousel.**
###Code
from ipywidgets import Layout, Button, Box
item_layout = Layout(height='100px', min_width='40px')
items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)]
box_layout = Layout(overflow_x='scroll',
border='3px solid black',
width='500px',
height='',
flex_direction='row',
display='flex')
carousel = Box(children=items, layout=box_layout)
VBox([Label('Scroll horizontally:'), carousel])
###Output
_____no_output_____
###Markdown
A widget for exploring layout optionsThe widgets below was written by ipywidgets user [Doug Redden (@DougRzz)](https://github.com/DougRzz). If you want to look through the source code to see how it works, take a look at this [notebook he contributed](cssJupyterWidgetStyling-UI.ipynb).Use the dropdowns and sliders in the widget to change the layout of the box containing the five colored buttons. Many of the CSS layout optoins described above are available, and the Python code to generate a `Layout` object reflecting the settings is in a `TextArea` in the widget.
###Code
from layout_preview import layout
layout
###Output
_____no_output_____
###Markdown
Predefined stylesIf you wish the styling of widgets to make use of colors and styles defined by the environment (to be consistent with e.g. a notebook theme), many widgets enable choosing in a list of pre-defined styles.For example, the `Button` widget has a `button_style` attribute that may take 5 different values: - `'primary'` - `'success'` - `'info'` - `'warning'` - `'danger'`besides the default empty string ''.
###Code
from ipywidgets import Button
Button(description='Danger Button', button_style='danger')
###Output
_____no_output_____
###Markdown
The `style` attributeWhile the `layout` attribute only exposes layout-related CSS properties for the top-level DOM element of widgets, the `style` attribute is used to expose non-layout related styling attributes of widgets.However, the properties of the `style` atribute are specific to each widget type.
###Code
b1 = Button(description='Custom color')
b1.style.button_color = 'lightgreen'
b1
###Output
_____no_output_____
###Markdown
You can get a list of the style attributes for a widget with the `keys` property.
###Code
b1.style.keys
###Output
_____no_output_____
###Markdown
Just like the `layout` attribute, widget styles can be assigned to other widgets.
###Code
b2 = Button()
b2.style = b1.style
b2
###Output
_____no_output_____
###Markdown
Widget styling attributes are specific to each widget type.
###Code
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
###Output
_____no_output_____ |
SARIMAX/hourly-weather-wind_direction.ipynb | ###Markdown
Seasonal Autoregressive Integrated Moving Average with Explanatory Variable (SARIMAX)The ARIMA model is a generalisation of an ARMA model that can be applied to non-stationary time series.The SARIMAX model is an modified and extended version of ARIMA that accounts for seasonality in the time series and includes independent predictor variables.
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
matplotlib.rcParams['figure.figsize'] = (16, 9)
pd.options.display.max_columns = 999
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
df = pd.read_csv('../datasets/hourly-weather-wind_direction.csv', parse_dates=[0], index_col='DateTime')
print(df.shape)
df.head()
###Output
(5000, 36)
###Markdown
Define ParametersMake predictions for 24-hour period using a training period of four weeks.
###Code
dataset_name = 'Hourly Weather Wind Direction'
dataset_abbr = 'HWD'
model_name = 'SARIMAX'
context_length = 24*7*4 # Four weeks
prediction_length = 24
###Output
_____no_output_____
###Markdown
Define Error MetricThe seasonal variant of the mean absolute scaled error (MASE) will be used to evaluate the forecasts.
###Code
def calc_sMASE(training_series, testing_series, prediction_series, seasonality=prediction_length):
a = training_series.iloc[seasonality:].values
b = training_series.iloc[:-seasonality].values
d = np.sum(np.abs(a-b)) / len(a)
errors = np.abs(testing_series - prediction_series)
return np.mean(errors) / d
###Output
_____no_output_____
###Markdown
Example SARIMAX ModelExploration of how SARIMA models work using a single example time series.
###Code
ts_ex = 'ts10'
df_ex = df.loc[:, ts_ex]
# Plot data from first five days
df_ex.iloc[:24*5].plot();
###Output
_____no_output_____
###Markdown
Time Series DecompositionDecompose the example time series into trend, seasonal, and residual components.
###Code
fig = seasonal_decompose(df_ex.iloc[-500:], model='additive').plot()
###Output
_____no_output_____
###Markdown
There doesn't appear to be a consistent trend. We can run a Dicky-Fuller test to confirm the stationarity.
###Code
dftest = adfuller(df_ex.iloc[-500:], autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
dfoutput
###Output
_____no_output_____
###Markdown
The very low p-value confirms that the data is stationary. We can see that there is daily seasonality which we will capture in our SARIMAX model. Plot ACF and PACFThe Autocorrelation Function (ACF) is the correlation of a signal with a delayed copy of itself as a function of delay.The Partial Autocorrelation Function (PACF) is the partial correlation of a signal with a delayed copy of itself, controlling for the values of the time series at all shorter delays, as a function of delay.
###Code
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex, lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex, lags=50, ax=ax[1])
###Output
_____no_output_____
###Markdown
There is clearly daily seasonality. A seasonality of 24 hours will be used for the SARIMAX model. Differencing by 24 hours helps remove the seasonality:
###Code
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex.diff(24).dropna(), lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex.diff(24).dropna(), lags=50, ax=ax[1])
fig = seasonal_decompose(df_ex.diff(24).dropna(), model='additive').plot()
###Output
_____no_output_____
###Markdown
Prepare Data
###Code
df_ex = pd.DataFrame(df_ex)
days = df_ex.index.dayofweek
dummy_days = pd.get_dummies(days)
dummy_days.columns = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
dummy_days.index = df_ex.index
df_ex = pd.concat([df_ex, dummy_days], axis=1)
df_ex.head()
###Output
_____no_output_____
###Markdown
Build ModelAs SARIMA models can be slow to train, a SARIMAX(1,1,1)(1,1,1)24 model will be used, as this should provide reasonable performance across the time series. Optimised forecasts could be obtained by using a grid search methodology to derive the best performining parameters, as demonstrated in the ARIMA and ARIMAX notebooks, but this would be at the expense of much greater training times.
###Code
def runSARIMAX(time_series, test_length=prediction_length, train_length=context_length):
ts = time_series.iloc[-(test_length+train_length):]
ts_train = ts.iloc[:-test_length]
ts_test = ts.iloc[-test_length:]
sarimax = sm.tsa.SARIMAX(endog=ts_train.iloc[:, 0],
exog=ts_train.iloc[:, 1:],
order=(1,1,1),
seasonal_order=(1,1,1,24),
enforce_stationarity=False,
enforce_invertibility=False).fit()
summary = sarimax.summary()
fcst = sarimax.predict(start=ts.index[2], end=ts.index[-1],
exog=ts_test.iloc[:, 1:])
fcst = np.concatenate([np.array([0, 0]), fcst])
fcst = pd.DataFrame(data=fcst, index=ts.index, columns=['pred%s' % ts.columns[0][2:]])
return fcst, summary
import warnings
warnings.filterwarnings('ignore')
%%time
fcst, summary = runSARIMAX(df_ex)
df_ex = pd.concat([df_ex, fcst], axis=1)
print(summary)
# Example forecast
fcst0 = df_ex.copy()
fcst0['pred%s' % ts_ex[2:]][fcst0['pred%s' % ts_ex[2:]] < 0] = 0
fcst0.iloc[-4*prediction_length:, 0].plot(label='Actual', c='k', alpha=0.5)
fcst0.iloc[-4*prediction_length:, -1].plot(label='SARIMAX(1,1,1)(1,1,1)24', c='b', alpha=0.5)
plt.axvline(x=fcst0.index[-prediction_length], linestyle=':', linewidth=2, color='r', label='Start of test data')
plt.legend()
plt.title(ts_ex);
###Output
_____no_output_____
###Markdown
Evaluating SARIMAXTo evaluate SARIMAX, we will generate forecasts for each time series using the SARIMAX(1,1,1)(1,1,1)24 approach shown above. MASE and sMASE will be calculated for each individual time series, and the mean of all these scores will be used as overall accuracy metrics for SARIMAX on this dataset.
###Code
results = df.iloc[-(prediction_length+context_length):].copy()
tic = time()
for i, col in enumerate(df.columns):
if i % 10 == 0:
toc = time()
print("Running predictions for {}. Cumulative time: {:.1f} minutes.".format(col, (toc-tic)/60))
# Prepare DataFrame for selected column
dft = df.loc[:, col]
dft = pd.DataFrame(dft)
days = dft.index.dayofweek
dummy_days = pd.get_dummies(days)
dummy_days.columns = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
dummy_days.index = dft.index
dft = pd.concat([dft, dummy_days], axis=1)
# Find best model
fcst, summary = runSARIMAX(dft)
# Add predictions to results DataFrame
results['pred%s' % col[2:]] = fcst.values
toc = time()
print("Finished! Total run time: {:.1f} minutes.".format((toc-tic)/60))
results0 = results.copy()
results0[results0 < 0] = 0
results0.head()
sMASEs = []
for i, col in enumerate(df.columns):
sMASEs.append(calc_sMASE(results0[col].iloc[-(context_length + prediction_length):-prediction_length],
results0[col].iloc[-prediction_length:],
results0['pred%s' % str(i+1)].iloc[-prediction_length:]))
fig, ax = plt.subplots()
ax.hist(sMASEs, bins=20)
ax.set_title('Distributions of sMASEs for {} dataset'.format(dataset_name))
ax.set_xlabel('sMASE')
ax.set_ylabel('Count');
sMASE = np.mean(sMASEs)
print("Overall sMASE: {:.4f}".format(sMASE))
###Output
Overall sMASE: 0.7314
###Markdown
Show some example forecasts.
###Code
fig, ax = plt.subplots(5, 2, sharex=True)
ax = ax.ravel()
for col in range(1, 11):
ax[col-1].plot(results0.index[-prediction_length:], results0['ts%s' % col].iloc[-prediction_length:],
label='Actual', c='k', linestyle='--', linewidth=1)
ax[col-1].plot(results0.index[-prediction_length:], results0['pred%s' % col].iloc[-prediction_length:],
label='SARIMAX(1,1,1)(1,1,1)24', c='b')
ax[9].legend()
fig.suptitle('{} Predictions'.format(dataset_name));
###Output
_____no_output_____
###Markdown
Store the predictions and accuracy score for the SARIMAX models.
###Code
import pickle
with open('{}-sMASE.pkl'.format(dataset_abbr), 'wb') as f:
pickle.dump(sMASE, f)
with open('../_results/{}/{}-results.pkl'.format(model_name, dataset_abbr), 'wb') as f:
pickle.dump(results.iloc[-prediction_length:], f)
###Output
_____no_output_____ |
01-titanic/pandas/pandas.ipynb | ###Markdown
1. Какое количество мужчин и женщин ехало на корабле? В качестве ответа приведите два числа через пробел
###Code
sex_counts = df['Sex'].value_counts()
print('{} {}'.format(sex_counts['male'], sex_counts['female']))
###Output
577 314
###Markdown
2. Какой части пассажиров удалось выжить? Посчитайте долю выживших пассажиров. Ответ приведите в процентах (число в интервале от 0 до 100, знак процента не нужен), округлив до двух знаков.
###Code
survived_df = df['Survived']
count_of_survived = survived_df.value_counts()[1]
survived_percentage = 100.0 * count_of_survived / survived_df.value_counts().sum()
print("{:0.2f}".format(survived_percentage))
###Output
38.38
###Markdown
3. Какую долю пассажиры первого класса составляли среди всех пассажиров? Ответ приведите в процентах (число в интервале от 0 до 100, знак процента не нужен), округлив до двух знаков.
###Code
pclass_df = df['Pclass']
count_of_first_class_passengers = pclass_df.value_counts()[1]
first_class_percentage = 100.0 * count_of_first_class_passengers / survived_df.value_counts().sum()
print("{:0.2f}".format(first_class_percentage))
###Output
24.24
###Markdown
4. Какого возраста были пассажиры? Посчитайте среднее и медиану возраста пассажиров. Посчитайте среднее и медиану возраста пассажиров. В качестве ответа приведите два числа через пробел.
###Code
ages = df['Age'].dropna()
print("{:0.2f} {:0.2f}".format(ages.mean(), ages.median()))
###Output
29.70 28.00
###Markdown
5. Коррелируют ли число братьев/сестер с числом родителей/детей? Посчитайте корреляцию Пирсона между признаками SibSp и Parch.
###Code
correlation = df['SibSp'].corr(df['Parch'])
print("{:0.2f}".format(correlation))
###Output
0.41
###Markdown
6. Какое самое популярное женское имя на корабле? Извлеките из полного имени пассажира (колонка Name) его личное имя (First Name). Это задание — типичный пример того, с чем сталкивается специалист по анализу данных. Данные очень разнородные и шумные, но из них требуется извлечь необходимую информацию. Попробуйте вручную разобрать несколько значений столбца Name и выработать правило для извлечения имен, а также разделения их на женские и мужские.
###Code
def clean_name(name):
# First word before comma is a surname
s = re.search('^[^,]+, (.*)', name)
if s:
name = s.group(1)
# get name from braces (if in braces)
s = re.search('\(([^)]+)\)', name)
if s:
name = s.group(1)
# Removing appeal
name = re.sub('(Miss\. |Mrs\. |Ms\. )', '', name)
# Get first left word and removing quotes
name = name.split(' ')[0].replace('"', '')
return name
names = df[df['Sex'] == 'female']['Name'].map(clean_name)
name_counts = names.value_counts()
name_counts.head()
print(name_counts.head(1).index.values[0])
###Output
_____no_output_____ |
examples/metrics_multi.ipynb | ###Markdown
Logistic Regression - Ridge
###Code
lr = LogisticRegression(penalty='l2')
lr = train_model(lr, X_train, y_train)
print('Test score = ',lr.score(X_test,y_test))
prob, pred, label = get_data(lr, X_test, y_test)
lr_metrics = MultiClassMetrics(prob, pred, label, method='micro')
lr_metrics.give_threshold()
make_plots(label, pred, lr_metrics.fpr, lr_metrics.tpr,lr_metrics.threshold,
lr_metrics.recall, lr_metrics.precision)
###Output
Confusion matrix, without normalization
[[16 0 0]
[ 0 20 0]
[ 0 14 0]]
###Markdown
Logistic regression - Lasso
###Code
lr = LogisticRegression(penalty='l1')
lr = train_model(lr, X_train, y_train)
print('Test score = ',lr.score(X_test,y_test))
prob, pred, label = get_data(lr, X_test, y_test)
lr_metrics = MultiClassMetrics(prob, pred, label, method='micro')
lr_metrics.give_threshold()
make_plots(label, pred, lr_metrics.fpr, lr_metrics.tpr,lr_metrics.threshold,
lr_metrics.recall, lr_metrics.precision)
###Output
Training score = 0.97
Test score = 0.9
Confusion matrix, without normalization
[[16 0 0]
[ 0 20 0]
[ 0 14 0]]
###Markdown
RF
###Code
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
rf = RandomForestClassifier()
rf = train_model(rf, X_train, y_train)
print('Test score = ',rf.score(X_test,y_test))
prob, pred, label = get_data(rf, X_test, y_test)
rf_metrics = MultiClassMetrics(prob, pred, label, method='micro')
rf_metrics.give_threshold()
make_plots(label, pred, rf_metrics.fpr, rf_metrics.tpr,rf_metrics.threshold,
rf_metrics.recall, rf_metrics.precision)
###Output
Training score = 0.99
Test score = 0.96
Confusion matrix, without normalization
[[16 0 0]
[ 0 13 0]
[ 0 21 0]]
###Markdown
ExtraTrees
###Code
rf = ExtraTreesClassifier()
rf = train_model(rf, X_train, y_train)
print('Test score = ',rf.score(X_test,y_test))
prob, pred, label = get_data(rf, X_test, y_test)
rf_metrics = MultiClassMetrics(prob, pred, label, method='micro')
rf_metrics.give_threshold()
make_plots(label, pred, rf_metrics.fpr, rf_metrics.tpr,rf_metrics.threshold,
rf_metrics.recall, rf_metrics.precision)
###Output
Training score = 1.0
Test score = 0.98
Confusion matrix, without normalization
[[16 0 0]
[ 0 14 0]
[ 0 20 0]]
###Markdown
GBT
###Code
rf = GradientBoostingClassifier()
rf = train_model(rf, X_train, y_train)
print('Test score = ',rf.score(X_test,y_test))
prob, pred, label = get_data(rf, X_test, y_test)
rf_metrics = MultiClassMetrics(prob, pred, label, method='micro')
rf_metrics.give_threshold()
make_plots(label, pred, rf_metrics.fpr, rf_metrics.tpr,rf_metrics.threshold,
rf_metrics.recall, rf_metrics.precision)
###Output
Training score = 1.0
Test score = 0.96
Confusion matrix, without normalization
[[16 0 0]
[ 0 15 0]
[ 0 19 0]]
|
hw1/hw1-folder/code/HW-1.ipynb | ###Markdown
B.1 d.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
np.random.seed(10)
n = 256
sigma2 = 1
mean = 0
m_list = [1,2,4,8,16,32]
xList = np.array(range(1,n+1))/n
def f(x):
return 4 * np.sin(np.pi * x) * np.cos(6 * np.pi * x ** 2)
def y_i(x):
return f(x) + np.random.normal(0,1)
def cj(m,j):
sum = 0.0
for i in range((j-1) * m + 1, j * m + 1):
sum += y_i(i/n)
return sum/m
def f_hat(x, m):
sum = 0.0
for j in range(1, int(n/m) + 1):
if ((x > (j -1) *1.0* m /n) and (x <= j*m*1.0/n)):
sum += cj(m,j)
return sum
def f_bar(j,m):
sum = 0.0
for i in range((j-1)*m+1, j*m + 1):
sum += f(i/n)
return sum/m
def bias(m,n):
output = 0.0
for j in range(1, int(n/m)+1):
for i in range((j-1)*m, j*m):
output += (f_bar(j, m) - f(i/n))**2
return output/n
def variance(sigma2, m):
return sigma2 / m
#Initialize
empirical_error = []
bias_list = []
bias_sum = 0.0
variance_list = []
total_error = []
#Start iteration
for m in m_list:
empirical_error_sum = 0.0
# empirical_error
for i in range(1,n+1):
empirical_error_sum += (f_hat(i/n, m) - f(i/n))**2
empirical_error.append(empirical_error_sum/n)
# variance
bias_list.append(bias(m,n))
# bias
variance_list.append(variance(sigma2, m))
#total error
total_error = np.array(bias_list) + np.array(variance_list)
plt.plot(m_list,variance_list, label="Average Variance")
plt.plot(m_list,bias_list, label="Average Bias")
plt.plot(m_list,total_error, label="Total Error")
plt.plot(m_list,empirical_error, label="Average Empirical Error")
plt.xlabel("Step Size")
plt.ylabel("Average Error")
plt.legend()
# plt.savefig('pic_1.png')
###Output
_____no_output_____
###Markdown
B.1 e.
###Code
def polyfeatures(X, degree):
"""
Expands the given X into an n * d array of polynomial features of
degree d.
Returns:
A n-by-d numpy array, with each row comprising of
X, X * X, X ** 3, ... up to the dth power of X.
Note that the returned matrix will not include the zero-th power.
Arguments:
X is an n-by-1 column numpy array
degree is a positive integer
"""
outputX = X[:]
for i in range(2, degree + 1):
outputX = np.hstack((outputX,X**i))
return outputX
X = polyfeatures(X, 8)
X
X[:]
print(np.mean(X, axis=0))
np.std(X, axis=0)
(X - np.mean(X, axis=0)) / np.std(X, axis=0)
np.c_[np.ones([len(X), 1]), X]
filePath = "data/polydata.dat"
file = open(filePath,'r')
allData = np.loadtxt(file, delimiter=',')
X = allData[:, [0]]
y = allData[:, [1]]
X
class PolynomialRegression:
def __init__(self, degree=1, reg_lambda=1E-8):
"""
Constructor
"""
self.theta = None
self.regLambda = reg_lambda
self.degree = degree
def polyfeatures(self, X, degree):
"""
Expands the given X into an n * d array of polynomial features of
degree d.
Returns:
A n-by-d numpy array, with each row comprising of
X, X * X, X ** 3, ... up to the dth power of X.
Note that the returned matrix will not include the zero-th power.
Arguments:
X is an n-by-1 column numpy array
degree is a positive integer
"""
outputX = X[:]
for i in range(2, degree + 1):
outputX = np.hstack((outputX,X**i))
return outputX
def fit(self, X, y):
"""
Trains the model
Arguments:
X is a n-by-1 array
y is an n-by-1 array
Returns:
No return value
Note:
You need to apply polynomial expansion and scaling
at first
"""
n = len(X)
# standardization
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
X = self.polyfeatures(X, self.degree)
print(X)
X = np.c_[np.ones([n, 1]), X]
# add 1s column
n, d = X.shape
self.theta = np.linalg.solve((X.T @ X) + self.regLambda*np.identity(d), X.T @ y)
print(self.theta)
def predict(self, X):
"""
Use the trained model to predict values for each instance in X
Arguments:
X is a n-by-1 numpy array
Returns:
an n-by-1 numpy array of the predictions
"""
n = len(X)
X = self.polyfeatures(X, self.degree)
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
# add 1s column
X_ = np.c_[np.ones([n, 1]), X]
# predict
return X_ @ self.theta
def trainPredicted(self, X):
n = len(X)
X = self.polyfeatures(X, self.degree)
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
# add 1s column
X_ = np.c_[np.ones([n, 1]), X]
# predict
return X_ @ self.theta
from sklearn import model_selection
loo = model_selection.LeaveOneOut()
for train_index, test_index in loo.split(X):
Xtrain, Xtest = X[train_index], X[test_index]
ytrain, ytest = y[train_index], y[test_index]
n = len(Xtrain)
errorTrain = np.zeros(n)
errorTest = np.zeros(n)
for i in range(1, 1 + 1):
model = PolynomialRegression(8, 0)
model.fit(Xtrain[:i], Ytrain[:i])
testPredicted = model.predict(Xtest)
singleErrorFromTrain = np.mean((model.trainPredicted(Xtrain[:i]) - ytrain[:i])**2)
errorTrain = np.append(errorTrain, singleErrorFromTrain)
singleErrorFromTest = np.mean((testPredicted - ytest[:i])**2)
errorTest = np.append(errorTest, singleErrorFromTest)
import mnist
import numpy as np
mndata = mnist.MNIST("./python-mnist/data/")
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
# Train function
def train(X,y,lam):
n, d = X.shape
y = np.eye(10)[y] # put y into a one hot encoding matrix
W = np.linalg.pinv(X.T @ X + lam*np.identity(d)) @ (X.T @ y)
return W
def predict(W, X_new):
return (X_new @ W_hat).argmax(axis=1)
# Compute weights
W_hat = train(X_train, labels_train, lam = 0.0001)
# Computed predicted training label
predicted_train = predict(W=W_hat, X_new=X_train)
# Compute predicted testing label
predicted = predict(W=W_hat, X_new=X_test)
# Compute error rate for both training and testing
train_error_rate = 1 -( sum(predicted_train == labels_train) / len(labels_train))
test_error_rate = 1 - (sum(predicted == labels_test) / len(labels_test))
print("Training Error is: ", train_error_rate)
print("Testing Error Rate is: ", test_error_rate)
#Training Error is: 0.14806666666666668
#Testing Error Rate is: 0.14659999999999995
W_hat
X_train[0] @ W_hat
X_train.shape
np.eye(10)[labels_train][0]
p = 100
n, d = X_train.shape
G = np.random.normal(0, np.sqrt(0.1), p*d).reshape((p, d))
b = np.random.uniform(0, 2*np.pi, p).reshape((p,1))
h = ((G @ X_train.T)+b).T
h[0]
a= np.random.permutation([[1, 4, 9, 12, 15],
[2,2,2,2,2],
[3,3,3,3,3]])
a
np.cos((((G @ X_train.T)+b).T)[0])
###Output
_____no_output_____
###Markdown
B2.A
###Code
%%time
import numpy as np
import matplotlib.pyplot as plt
import mnist
bestw = None
mndata = mnist.MNIST('./python-mnist/data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
# This function trains the model the return the weights
def train(X, Y):
lambda_ = 0.0001
n, d = np.shape(X)
reg_matrix = lambda_ * np.identity(d)
reg_matrix[0,0] = 0
W = np.linalg.solve(X.T @ X + reg_matrix, X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
return (X @ W).argmax(axis = 1)
# This function applt the transformation to data
def h1(X_train, p):
n, d = X_train.shape
var = np.sqrt(0.1)
G = np.random.normal(0, var, p * d).reshape(p, d)
b = np.random.uniform(0, 2 * np.pi, p).reshape(1, p)
h = np.cos(np.dot(X_train, G.T) + b.T)
return h
n, d = X_train.shape
training_error_all = []
validing_error_all = []
W_list = []
# loop from p=500 to p=6000, step=500
for p in list(range(500, 6001, 500)):
h = h1(X_train, p)
# Train test split with 80%-20%
train_index = np.random.choice(np.arange(n), int(X_train.shape[0] * 0.8), replace=False)
valid_index = np.setdiff1d(np.arange(n), train_index)
train_data = h[train_index, :]
valid_data = h[valid_index, :]
y_train = np.eye(10)[labels_train[train_index]]
# Compute weights
W_hat = train(train_data, y_train)
W_list.append(W_hat)
# Compute train predicted
predict_train = predict(W_hat, train_data)
predict_train = labels_train[train_index] - predict_train
train_error_single = np.count_nonzero(predict_train) / len(predict_train) #train_size
training_error_all.append(train_error_single)
# Compute test predicted
predicted_test = predict(W_hat, valid_data)
predicted_test = labels_train[valid_index] - predicted_test
valid_error_single = np.count_nonzero(predicted_test) / len(predicted_test)
validing_error_all.append(valid_error_single)
print("p: ", p,", train_err: ", train_error_single, ", test_err: ", valid_error_single)
%%time
x_index = list(range(500, 6001, 500))
plt.plot(x_index, training_error_all, label="Training Error")
plt.scatter(x_index, training_error_all)
plt.plot(x_index, validing_error_all, label="Testing Error")
plt.scatter(x_index, validing_error_all)
plt.xlabel("P")
plt.ylabel("Prediction Error Rate")
plt.legend()
plt.savefig('pic_2.png')
###Output
CPU times: user 5.96 s, sys: 1.14 s, total: 7.1 s
Wall time: 7.25 s
###Markdown
B2.b
###Code
%%time
# This function trains the model the return the weights
def train(X, Y):
lambda_ = 0.0001
n, d = np.shape(X)
reg_matrix = lambda_ * np.identity(d)
reg_matrix[0,0] = 0
W = np.linalg.solve(X.T @ X + reg_matrix, X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
return (X @ W).argmax(axis = 1)
# This function applt the transformation to data
def h1(X_train, p):
n, d = X_train.shape
var = np.sqrt(0.1)
G = np.random.normal(0, var, p * d).reshape(p, d)
b = np.random.uniform(0, 2 * np.pi, p).reshape(1, p)
h = np.cos(G @ X_train.T).T
return h
n, d = X_train.shape
training_error_all = []
validing_error_all = []
W_list = []
# Train test split with 80%-20%
h = h1(X_train, p)
train_index = np.random.choice(np.arange(n), int(X_train.shape[0] * 0.8), replace=False)
valid_index = np.setdiff1d(np.arange(n), train_index)
train_data = h[train_index, :]
valid_data = h[valid_index, :]
y_train = np.eye(10)[labels_train[train_index]]
# loop from p=500 to p=6000, step=500
for p in [6000]:
# Compute weights
W_hat = train(train_data, y_train)
W_list.append(W_hat)
# Compute train predicted
predict_train = predict(W_hat, train_data)
predict_train = labels_train[train_index] - predict_train
train_error_single = np.count_nonzero(predict_train) / len(predict_train) #train_size
training_error_all.append(train_error_single)
# Compute test predicted
predicted_test = predict(W_hat, valid_data)
predicted_test = labels_train[valid_index] - predicted_test
valid_error_single = np.count_nonzero(predicted_test) / len(predicted_test)
validing_error_all.append(valid_error_single)
print("p: ", p,", train_err: ", train_error_single, ", test_err: ", valid_error_single)
%%time
mndata = mnist.MNIST('./python-mnist/data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
# This function trains the model the return the weights
def train(X, Y):
lambda_ = 0.0001
n, d = np.shape(X)
X = np.c_[np.ones([n, 1]), X]
reg_matrix = lambda_ * np.identity(d+1)
reg_matrix[0,0] = 0
W = np.linalg.solve(X.T @ X + reg_matrix, X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
n = len(X)
X = np.c_[np.ones([n, 1]), X]
return (X @ W).argmax(axis = 1)
# # This function applt the transformation to data
# def h1(X_train, p):
# n, d = X_train.shape
# var = np.sqrt(0.1)
# G = np.random.normal(0, var, p * d).reshape(p, d)
# b = np.random.uniform(0, 2 * np.pi, p).reshape(1, p)
# h = np.cos(G @ X_train.T).T
# return h
def h1 (X , p , train = True ) :
n , d = X.shape
# Generate a random matrix G,
# where each entry sampled i.i.d. from a Gaussian ( mean = 0, variance = 0.1)
if train:
G = np.random.normal (0 , np.sqrt(0.1) , (p , d))
# Generate a random vector b, where each item is Unif (0 , 2pi)
b = np.random.uniform (0 , 2 * np.pi,(1 , p ))
else:
G = None
b = None
# return the transformed X: h(X) = cos (GX ’ + b)
return np.cos(np.matmul(G , X.T).T + b)
dt =0.05 # Delta = 0.05
p = 6000 # From previous question, the best p is 6000.
best_W = W_list[-1] # The W when p is 6000
sigma = np.sqrt(0.1)
n, d = X_test.shape
h = h1(X_test, p)
predicted = predict(best_W, h)
test_error = sum(predicted == labels_test) / predicted.size
H = np.sqrt(np.log(2/dt)/(2*predicted.size))
print(f'The test_error is {test_error}')
print(f'Confidence Interval:[{test_error - H} : {test_error + H}]')
# The test_error is 0.1197
# Confidence Interval:[0.1061189848425938 : 0.1332810151574062]
%%time
# This function trains the model the return the weights
def train(X, Y, lambda_ = 0.0001):
n, d = np.shape(X)
reg_matrix = lambda_ * np.identity(d)
reg_matrix[0,0] = 0
W = np.linalg.solve(X.T @ X + reg_matrix, X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
return (X @ W).argmax(axis = 1)
n, d = X_train.shape
sigma = np.sqrt(0.1)
p = 6000
G = np.random.normal(0, sigma, p * d).reshape(p, d)
b = np.random.uniform(0, 2 * np.pi, p).reshape(1, p)
# This function applt the transformation to data
def h1(X_train, G, b):
return np.cos(np.dot(X_train, G.T) + b.T)
dt =0.05 # Delta = 0.05
p = 6000 # From previous question, the best p is 6000.
# Gew W
h_train = h1(X_train, G, b)
y_train = np.eye(10)[labels_train]
best_W = train(h_train, y_train, lambda_= 0.01)
print('train complete')
sigma = np.sqrt(0.1)
n, d = X_test.shape
h_test = h1(X_test, G, b)
predicted = predict(best_W, h_test)
print('predict complete')
test_error = sum(predicted == labels_test) / predicted.size
H = np.sqrt(np.log(2/dt)/(2*predicted.size))
print(f'The test_error is {test_error}')
print(f'Confidence Interval:[{test_error - H} : {test_error + H}]')
# The test_error is 0.1197
# Confidence Interval:[0.1061189848425938 : 0.1332810151574062]
train_percent = 0.8
ori_train_size = X_train.shape[0]
ori_test_size = X_test.shape[0]
Y_train = np.eye(10)[labels_train]
lam = 0.01
variance = 0.1
p = 6000
d = X_test.shape[1]
G = np.random.normal(0, np.sqrt(variance), size = (p,d))
b = np.random.uniform(low=0, high=2*np.pi, size=(p,1))
def transform(X, p):
d = X_test.shape[1]
G = np.random.normal(0, np.sqrt(variance), size = (p,d))
b = np.random.uniform(low=0, high=2*np.pi, size=(p,1))
return np.cos(np.dot(X_test, G.T) + b.T)
index = np.arange(ori_train_size)
np.random.shuffle(index)
train_index = index[0:int(train_percent * ori_train_size)]
validation_index = index[int(train_percent * ori_train_size) : ]
shuffled_labels_validation = labels_train[validation_index]
shuffled_labels_train = labels_train[train_index]
new_Y_train = Y_train[train_index, :]
transed_X_train= np.cos(np.dot(X_train, G.T) + b.T)
# transed_X_train = transform(X_train, p)
new_X_train = transed_X_train[train_index, :]
new_X_validate = transed_X_train[validation_index, :]
Wp = train(new_X_train, new_Y_train)
traned_X_test = np.cos(np.dot(X_test, G.T) + b.T)
# traned_X_test = transform(X_test, p)
test_pre = predict(Wp, traned_X_test)
test_error = sum([1 for i in range(len(test_pre)) if test_pre[i] != labels_test[i] ]) / len(test_pre)
q = X_test.shape[0]
interval = np.sqrt(np.log(40) / (2*q))
print(test_error)
test_error
import numpy as np
import matplotlib.pyplot as plt
import mnist
mndata = mnist.MNIST('./python-mnist/data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
# This function trains the model the return the weights
def train(X, Y):
lambda_ = 0.0001
n, d = np.shape(X)
W = np.linalg.solve(X.T @ X + lambda_ * np.identity(d), X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
return (X @ W).argmax(axis = 1)
# This function applt the transformation to data
def h1(X_train, X_test, p):
n, d = X_train.shape
sigma = np.sqrt(0.1)
G = np.random.normal(0, sigma, p * d).reshape(p, d)
b = np.random.uniform(0, 2 * np.pi, p).reshape(p, 1)
h_train = np.cos(np.dot(X_train, G.T) + b.T)
h_test = np.cos(np.dot(X_test, G.T) + b.T)
return h_train, h_test, G, b
n, d = X_train.shape
training_error_all = []
validing_error_all = []
W_list = []
Gb_list = []
train_index = np.random.choice(np.arange(n), int(X_train.shape[0] * 0.8), replace=False)
valid_index = np.setdiff1d(np.arange(n), train_index)
# loop from p=500 to p=6000, step=500
for p in list(range(500, 6001, 500)):
h_train, h_test, G, b = h1(X_train[train_index, :], X_train[valid_index, :], p)
# h = h1(X_train, p)
# Train test split with 80%-20%
Gb_list.append((G,b))
train_data = h_train
valid_data = h_test
y_train = np.eye(10)[labels_train[train_index]]
# Compute weights
W_hat = train(train_data, y_train)
W_list.append(W_hat)
# Compute train predicted
predict_train = predict(W_hat, train_data)
predict_train = labels_train[train_index] - predict_train
train_error_single = np.count_nonzero(predict_train) / len(predict_train) #train_size
training_error_all.append(train_error_single)
# Compute test predicted
predicted_test = predict(W_hat, valid_data)
predicted_test = labels_train[valid_index] - predicted_test
valid_error_single = np.count_nonzero(predicted_test) / len(predicted_test)
validing_error_all.append(valid_error_single)
print("p: ", p,", train_err: ", train_error_single, ", test_err: ", valid_error_single)
x_index = list(range(500, 6001, 500))
plt.plot(x_index, training_error_all, label="Training Error")
plt.scatter(x_index, training_error_all)
plt.plot(x_index, validing_error_all, label="Testing Error")
plt.scatter(x_index, validing_error_all)
plt.xlabel("P")
plt.ylabel("Prediction Error Rate")
plt.legend()
# Here I am using the testing data to finalize the error rate.
# The testing data which I did not use in previous question.
# Used weight from the best weight from previous question.
# Used G and b in the previous question to transform the testing data.
mndata = mnist.MNIST('./python-mnist/data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
dt =0.05 # Delta = 0.05
p = 6000 # From previous question, the best p is 6000.
W_best = W_list[-1]
sigma = np.sqrt(0.1)
G = Gb_list[-1][0]
b = Gb_list[-1][1]
h_test = np.cos(np.dot(X_test, G.T) + b.T)
y_train = np.eye(10)[labels_train]
print("transform done")
predicted = (h_test @ W_best).argmax(axis = 1)
print("predict done")
test_error = sum(predicted == labels_test) / predicted.size
H = np.sqrt(np.log(2/dt)/(2*predicted.size))
print(f'The test_error is {test_error}')
print(f'Confidence Interval:[{test_error - H} : {test_error + H}]')
# The test_error is 0.9506
# Confidence Interval:[0.9370189848425938 : 0.9641810151574062]
import numpy as np
import matplotlib.pyplot as plt
import mnist
mndata = mnist.MNIST('./python-mnist/data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
# This function trains the model the return the weights
def train(X, Y):
lambda_ = 0.0001
n, d = np.shape(X)
W = np.linalg.solve(X.T @ X + lambda_ * np.identity(d), X.T @ Y)
return W
# This function do the prediction
def predict(W,X):
return (X @ W).argmax(axis = 1)
# This function applt the transformation to data
def h1(X_train, X_test, p):
n, d = X_train.shape
sigma = np.sqrt(0.1)
G = np.random.normal(0, sigma, p * d).reshape(p, d)
b = np.random.uniform(0, 2 * np.pi, p).reshape(p, 1)
h_train = np.cos(np.dot(X_train, G.T) + b.T)
h_test = np.cos(np.dot(X_test, G.T) + b.T)
return h_train, h_test, G, b
%%time
n, d = X_train.shape
p = 6000
dt = 0.05
sigma = np.sqrt(0.1)
h_train, h_test, G, b = h1(X_train, X_test, p)
y_train = np.eye(10)[labels_train]
# Compute weights
W_hat = train(h_train, y_train)
# Compute test predicted
predicted_test = predict(W_hat, h_test)
valid_error_single = 1 - (sum(labels_test == predicted_test) / len(labels_test))
H = np.sqrt(np.log(2/dt)/(2*len(labels_test)))
print(f'The test_error is {valid_error_single}')
print(f'Confidence Interval:[{valid_error_single - H} : {valid_error_single + H}]')
# The test_error is 0.04600000000000004
# Confidence Interval:[0.03241898484259385 : 0.059581015157406235]
1 - (sum(labels_test == predicted_test) / len(labels_test))
(h_test @ W_best).argmax(axis = 1)
predict(W_list[-1], h_test)
W_list[-1].shape
predicted = predict(W_list[-1], h_test)
print("predict done")
test_error = sum(predicted == labels_test) / predicted.size
test_error
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.